diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b724e4d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+/upload/
\ No newline at end of file
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/Flask_SQLAlchemy.iml b/.idea/Flask_SQLAlchemy.iml
new file mode 100644
index 0000000..37901ac
--- /dev/null
+++ b/.idea/Flask_SQLAlchemy.iml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..d56657a
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..7c3dddc
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Predict.py b/Predict.py
new file mode 100644
index 0000000..c9511ef
--- /dev/null
+++ b/Predict.py
@@ -0,0 +1,469 @@
+import string
+from struct import unpack
+import matplotlib
+import numpy as np
+import pandas as pd
+from sklearn.preprocessing import StandardScaler
+from sklearn.metrics import accuracy_score
+from scipy import signal
+from scipy.fftpack import fft, fftfreq
+from scipy.signal import butter, lfilter, freqz, filtfilt
+import matplotlib.pyplot as plt
+from scipy.signal import find_peaks
+from scipy.signal import welch
+import random
+import sys
+import joblib
+from numpy import NaN, Inf, arange, isscalar, asarray, array
+import sklearn
+import pickle
+import pandas as pd
+from cmath import sqrt
+import numpy as np
+import numpy as np
+import pandas as pd
+from scipy import signal
+import matplotlib.pyplot as plt
+from sklearn.model_selection import train_test_split
+import os
+from fastdtw import fastdtw
+from scipy.spatial.distance import euclidean
+plt.rcParams["figure.figsize"] = (30, 20) # Thay chinh size o day cung duoc a
+matplotlib.rc('xtick', labelsize=15)
+matplotlib.rc('ytick', labelsize=15)
+plt.rc('axes', labelsize=15)
+plt.rc('legend', fontsize=15, loc='lower right')
+import hydra
+from hydra import utils
+import numpy as np
+from hydra import initialize, initialize_config_module, initialize_config_dir, compose
+from omegaconf import OmegaConf
+import os
+
+with initialize(config_path="configs"):
+ data_cfg = compose(config_name="data_path")
+data_cfg = OmegaConf.create(data_cfg)
+
+
+class Signal_new:
+
+ def __init__(self):
+ self.x = None
+ self.y = None
+ self.z = None
+ self.a = None
+ self.all_index_peaks = 0
+ self.fs = None
+ self.a_lpf = None
+ self.thr = None
+ self.a_pulse = None
+ self.x_f = None
+ self.y_f = None
+ self.z_f = None
+ self.fs = 100
+ self.x_f = None
+ self.y_f = None
+ self.z_f = None
+
+
+ def set_signal(self, signal, isFilter=True, cutoff=5.0, order=5, fs=100):
+ # FOR DATA FROM ANDROID LOADED FILE
+ # timestamp, x, y, z = np.genfromtxt(str(path), delimiter=";", dtype='str',unpack=True)
+ # self.x = np.array([float(item) for item in x])
+ # self.y = np.array([float(item) for item in y])
+ # self.z = np.array([float(item) for item in z])
+
+ # FOR DATAS FROM IPHONE
+ # _,x,y,z = 10*np.loadtxt(path, delimiter=";", skiprows=1, unpack=True)
+ # self.x = np.array(x)
+ # self.y = np.array(y)
+ # self.z = np.array(z)
+
+ self.x = np.array([item[0] for item in signal])
+ self.y = np.array([item[1] for item in signal])
+ self.z = np.array([item[2] for item in signal])
+
+
+ self.a = np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
+ if isFilter:
+ self.low_pass_filter(cutoff=cutoff, order=order)
+
+ def butter(self, cutoff, order):
+ f_nyq = 0.5 * self.fs
+ f_normal = cutoff / f_nyq
+ numerator, denominator = butter(
+ order, f_normal, btype='low', analog=False)
+ return numerator, denominator
+
+ def butter_lowpass_filter(self, data, cutoff=5.0, order=5):
+ b, a = self.butter(cutoff, order=order)
+ y = filtfilt(b, a, data)
+ return y
+
+ def low_pass_filter(self, cutoff=5.0, order=5):
+
+ self.a_lpf = self.butter_lowpass_filter(
+ self.a, cutoff=cutoff, order=order)
+ self.x_f = self.butter_lowpass_filter(
+ self.x, cutoff=cutoff, order=order)
+ self.y_f = self.butter_lowpass_filter(
+ self.y, cutoff=cutoff, order=order)
+ self.z_f = self.butter_lowpass_filter(
+ self.z, cutoff=cutoff, order=order)
+ self.thr = np.average(self.a_lpf)
+
+ def collect_index_peaks(self, sig_pulse, num_thr=10):
+ list_index = []
+ peaks, _ = find_peaks(sig_pulse)
+ for idx, peak in enumerate(peaks):
+ indices = []
+ for i, e in enumerate(reversed(range(0, peak, 1))):
+
+ if sig_pulse[e] == 0:
+ break
+ indices.append(e)
+ # indices = indices[::-1]
+ indices.reverse()
+ for i, e in enumerate(range(peak, len(sig_pulse), 1)):
+
+ if sig_pulse[e] == 0:
+ break
+ indices.append(e)
+ if len(indices) > num_thr:
+ list_index.append(indices)
+ else:
+ peaks = np.delete(peaks, i)
+ return list_index
+
+ def create_square_pulse(self, sig, thr):
+ # create a numpy array to store binary pulses
+ sig_pulse = np.zeros_like(sig)
+
+ for i, e in enumerate(sig):
+ if e >= thr:
+ sig_pulse[i] = 1
+ return sig_pulse
+
+ def get_all_peaks(self, num_thr=5, ratio=1.03):
+ self.a_pulse = self.create_square_pulse(self.a_lpf, ratio*self.thr)
+ # print(self.a_pulse[:200])
+ list_indexs = self.collect_index_peaks(self.a_pulse, num_thr=num_thr)
+
+ features = []
+ for i in list_indexs:
+ features.append(self.a_lpf[i])
+ features = np.array(features)
+
+ index_peaks = []
+ for i, e in enumerate(features):
+ idx = np.where(e == max(e))[0][0]
+ index_peaks.append(list_indexs[i][idx])
+
+ self.all_index_peaks = np.array(index_peaks)
+ return self.all_index_peaks
+
+
+ # Fit n_point = window_size
+ def scale_data_point(self, peak, sig, n_point):
+ data_points = []
+ left_peaks = []
+ right_peaks = []
+ id_peaks = []
+ n = n_point//2
+ if peak <= n:
+ left_peaks = sig[:peak]
+ right_peaks = sig[peak:n_point]
+ index_peaks = range(n_point)
+ elif peak+n > len(sig):
+ right_peaks = sig[peak:len(sig)]
+ num = n_point-(len(sig)-peak)
+ left_peaks = sig[peak-num:peak]
+ index_peaks = range(peak-num, len(sig), 1)
+
+ else:
+ left_peaks = sig[peak-n:peak]
+ right_peaks = sig[peak:peak+n]
+ index_peaks = range(peak-n, peak+n, 1)
+
+ data_points = np.concatenate((left_peaks, right_peaks))
+ return data_points, index_peaks
+
+ def get_window_data(self, peak, x, y, z, a, a_lpf, num_point):
+ data_points_a_lpf, index_window = self.scale_data_point(
+ peak, a_lpf, num_point)
+ data_points_x = x[index_window]
+ data_points_y = y[index_window]
+ data_points_z = z[index_window]
+ data_points_a = a[index_window]
+ # print("data point x: " + str(len(data_points_x)))
+
+ h = a_lpf[peak]
+ data = []
+ data.append(data_points_x)
+ data.append(data_points_y)
+ data.append(data_points_z)
+ data.append(data_points_a)
+
+ data.append(data_points_a_lpf)
+ data.append(h)
+
+ return data
+
+ def resample(self, sig, num):
+ return signal.resample(sig, num)
+
+ def fft_transform(self, sig):
+ dt = 1.0/self.fs
+ n = len(sig)
+ fhat = np.fft.fft(sig, n)
+ PSD = fhat*np.conj(fhat)/n # phổ công suất
+ freq = (1/(dt*n)) * np.arange(n)
+ L = np.arange(1, np.floor(n/2), dtype='int')
+ return fhat, PSD, freq, L
+
+ def get_psd_values(self, sig):
+ _, PSD, freq, L = self.fft_transform(sig)
+ psd = np.abs(PSD[L])
+ return psd
+
+ def create_data_point(self, data, thr, num_feature_psd=15):
+ # time domain
+ dt = []
+ mean = np.mean(data[3])
+ std = np.std(data[3])
+ max_ = max(data[3])
+# min_ = min(data[4])
+ dt.append(mean)
+ dt.append(std)
+ dt.append(max_)
+# dt.append(min_)
+
+# freq domain
+ psd_x = self.get_psd_values(data[0])
+ psd_y = self.get_psd_values(data[1])
+ psd_z = self.get_psd_values(data[2])
+ psd_a = self.get_psd_values(data[4])
+
+ data_points = []
+ data_points.extend(dt)
+ data_points.extend(data[4])
+ data_points.extend(psd_x[:num_feature_psd])
+ data_points.extend(psd_y[:num_feature_psd])
+ data_points.extend(psd_z[:num_feature_psd])
+ data_points.extend(psd_a[:num_feature_psd])
+ return data_points
+
+ def get_data_point(self, peak, num_point, num_resample=64):
+ data = self.get_window_data(
+ peak, self.x_f, self.y_f, self.z_f, self.a, self.a_lpf, num_point)
+ # print(data)
+ if num_point != num_resample:
+ data[0] = self.resample(data[0], num_resample)
+ data[1] = self.resample(data[1], num_resample)
+ data[2] = self.resample(data[2], num_resample)
+ data[3] = self.resample(data[3], num_resample)
+ data[4] = self.resample(data[4], num_resample)
+ # print(len(data[0]))
+ data_point = self.create_data_point(data, self.thr)
+ return data_point
+
+ def visual_predict(self, y_predict, isSave=False, filename='default.eps'):
+ y_true = []
+ y_false = []
+ for i, e in enumerate(y_predict):
+ if e != 0:
+ y_true.append(self.all_index_peaks[i])
+ else:
+ y_false.append(self.all_index_peaks[i])
+
+ y_true = np.array(y_true)
+ y_false = np.array(y_false)
+
+ text = "Total peaks: " + str(y_true.shape[0])
+ # x_axis = (len(self.a_lpf) / 2 - 30*5)
+
+ # y_axis = np.max(self.a_lpf) + 0.5
+ # print(x_axis)
+
+ plt.figure(1, figsize=(30, 10))
+ plt.plot(self.a_lpf, 'olive')
+
+ if y_true.shape[0] != 0:
+ plt.plot(y_true, self.a_lpf[y_true], 'o',
+ color="forestgreen", label='Step Detect')
+ if y_false.shape[0] != 0:
+ plt.plot(y_false, self.a_lpf[y_false], 'xr', label='Fake Peak')
+
+ plt.legend()
+ plt.title(text, fontsize=16, color='purple')
+ plt.xlabel("Samples by time")
+ plt.ylabel("Amplitude")
+ # plt.text(x_axis, y_axis, text, fontsize=16, color='r')
+ plt.show()
+ if isSave:
+ plt.savefig(filename)
+ print(y_true.shape[0])
+
+
+class StepPredict:
+ def __init__(self, window_size):
+ self.model = None
+ self.scaler = None
+ self.x_test = None
+ self.y_test = None
+ self.window_size = window_size
+
+ def load_model(self, path_file):
+ self.model = pickle.load(open(path_file, 'rb'))
+
+ def loadScaler(self, path_file):
+ self.scaler = pickle.load(open(path_file, 'rb'))
+
+ def predict_peak(self, data_point):
+ data_point = np.array(data_point)
+ # print(data_point.shape)
+ data_point = data_point.reshape(1, -1)
+ data_point_norm = self.scaler.transform(data_point)
+ y_predict = self.model.predict(data_point_norm)
+ return y_predict[0]
+
+ def process(self, signal):
+ peaks = signal.all_index_peaks
+ print(peaks)
+ pre_window = self.window_size[0]
+ y_predicts = []
+ y_window = []
+ pre_peak = 0
+ distance_peak = 10000000
+ T_thr1 = 32
+ T_thr2 = 21
+
+ for peak in peaks:
+ num_size = pre_window
+ data_point = signal.get_data_point(peak, num_size)
+ y_pre = self.predict_peak(data_point)
+ if y_pre == 0:
+ for size in self.window_size:
+ if size != pre_window:
+ data_point = signal.get_data_point(peak, size)
+ y_pre = self.predict_peak(data_point)
+ if y_pre != 0:
+ pre_window = size
+ break
+
+ # nguong loai diem canh nhau
+ if (y_pre != 0):
+ distance_peak = peak-pre_peak
+ if (pre_window == max(self.window_size)):
+ if distance_peak < T_thr1:
+ y_pre = 0
+ else:
+ if distance_peak <= T_thr2:
+ y_pre = 0
+ pre_peak = peak
+ y_predicts.append(y_pre)
+ y_window.append(pre_window)
+ return y_predicts, y_window
+
+def scale_data(X_train, X_test):
+ sc = StandardScaler()
+ sc_X_train = sc.fit_transform(X_train)
+ sc_X_test = sc.transform(X_test)
+ return (sc_X_train, sc_X_test)
+
+def train_test_model(model, train_X, train_y, test_X, test_y):
+ model.fit(train_X, train_y)
+ prediction = model.predict(test_X)
+ acc = accuracy_score(test_y, prediction)
+ return prediction, acc, model
+
+def classifiers_trials(cls, train_X, train_y, test_X, test_y):
+ log_cols=["Classifier", "Accuracy"]
+ log = pd.DataFrame(columns=log_cols)
+ for cl in cls:
+ pred, accuracy, model = train_test_model(cl, train_X, train_y, test_X, test_y)
+ # cl.fit(train_X, train_y)
+ name = cl.__class__.__name__
+ print("="*30)
+ print(name)
+ print('****Results****')
+ # prediction = cl.predict(test_X)
+ # accuracy = accuracy_score(test_y, prediction)
+ print("Accuracy: {:.4%}".format(accuracy))
+ log_entry = pd.DataFrame([[name, accuracy*100]], columns=log_cols)
+ log = log.append(log_entry)
+ joblib.dump(model, open("/home/hatran_ame/DATN/step_detection_upgrade/data/data_ha/processed_data/data_20_7/trials/" + name + ".model", 'wb'))
+ print("="*30)
+ return log
+
+
+def predict(path):
+ sig = Signal_new()
+
+ sig.set_signal(path)
+
+ #initialize StepDetection module with list of window_size
+ stepDt = StepPredict(window_size=[64, 48, 32])
+
+ model_path = data_cfg.processed_data.data_30_7.model
+ scaler_path = data_cfg.processed_data.data_30_7.scaler
+
+ peaks = sig.get_all_peaks()
+
+ stepDt.load_model(model_path)
+ stepDt.loadScaler(scaler_path)
+
+ #adaptive resampling
+ y, w = stepDt.process(sig)
+
+ true_peak = []
+ for i, e in enumerate(peaks):
+ if y[i] != 0:
+ true_peak.append(e)
+ true_peak = np.array(true_peak)
+ return true_peak.shape[0]
+
+
+def convert_data(data: list) -> list:
+ lst = []
+ for i, e in enumerate(data):
+ lst.append([data[i]['valueX'], data[i]['valueY'], data[i]['valueZ']])
+
+ return lst
+
+# Test import lib and package
+# import warnings
+# import numpy as np
+# from modules.collect_data import read_csv
+# import hydra
+# from hydra import utils
+# import random
+
+# def get_abs_path(file_name):
+# return utils.to_absolute_path(file_name)
+
+# # Check hydra path
+# @hydra.main(config_path='../../configs',
+# config_name='data_path')
+
+
+def main():
+ # timestamp, x, y, z = np.genfromtxt(
+ # "/home/hatran_ame/DATN/step_detection_upgrade/src/code_ha/outputs/2.csv", delimiter=";", dtype='str',unpack=True)
+ # x_d = x[1:]
+ # y_d = y[1:]
+ # z_d = z[1:]
+ # x_data = [float(item) for item in x_d]
+ # y_data = [float(item) for item in y_d]
+ # z_data = [float(item) for item in z_d]
+
+ # print(x)
+ # sig = Signal_new()
+ # sig.set_signal("/home/hatran_ame/DATN/step_detection_upgrade/src/code_ha/outputs/1.csv")
+ # sig.visual_predict
+ # print(predict(signal).shape[0])
+
+ predict("/home/hatran_ame/DB_PY/Flask_SQLAlchemy/upload/by_hour/2022-07-27-13_22.csv")
+
+if __name__ == "__main__":
+ main()
diff --git a/README.md b/README.md
index e17073a..02c21c9 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,5 @@
# Flask_SQLAlchemy# Flask_SQL
+Function:
+ - Get accelerometers from user
+ - Response step data to user
+
\ No newline at end of file
diff --git a/__pycache__/Predict.cpython-37.pyc b/__pycache__/Predict.cpython-37.pyc
new file mode 100644
index 0000000..8680537
Binary files /dev/null and b/__pycache__/Predict.cpython-37.pyc differ
diff --git a/__pycache__/config.cpython-37.pyc b/__pycache__/config.cpython-37.pyc
index b741ee0..8c911a3 100644
Binary files a/__pycache__/config.cpython-37.pyc and b/__pycache__/config.cpython-37.pyc differ
diff --git a/__pycache__/model.cpython-37.pyc b/__pycache__/model.cpython-37.pyc
index 2397a26..0015fde 100644
Binary files a/__pycache__/model.cpython-37.pyc and b/__pycache__/model.cpython-37.pyc differ
diff --git a/__pycache__/run_server.cpython-39.pyc b/__pycache__/run_server.cpython-39.pyc
new file mode 100644
index 0000000..f5d080d
Binary files /dev/null and b/__pycache__/run_server.cpython-39.pyc differ
diff --git a/acc.db b/acc.db
deleted file mode 100644
index 5cecb08..0000000
Binary files a/acc.db and /dev/null differ
diff --git a/config.py b/config.py
index dbfafda..eecd4f0 100644
--- a/config.py
+++ b/config.py
@@ -6,9 +6,9 @@
APP_PORT = 8080
APP_SELF_REF = 'accelerometer'
SQLALCHEMY_DATABASE_URI = 'mysql://root:Long.0311@127.0.0.1:3306/acc_data'
+
SQLALCHEMY_TRACK_MODIFICATIONS = False
API_URI = f'http://{APP_HOST}:{APP_PORT}'
URL = f'http://{APP_HOST}:{APP_PORT}'
API_URI_SR = f'http://{APP_SELF_REF}:{APP_PORT}'
-
-
+UPLOAD_FOLDER = 'upload/data/'
diff --git a/configs/__pycache__/constants.cpython-37.pyc b/configs/__pycache__/constants.cpython-37.pyc
new file mode 100644
index 0000000..d3facca
Binary files /dev/null and b/configs/__pycache__/constants.cpython-37.pyc differ
diff --git a/configs/__pycache__/utils.cpython-37.pyc b/configs/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000..616774d
Binary files /dev/null and b/configs/__pycache__/utils.cpython-37.pyc differ
diff --git a/configs/constants.py b/configs/constants.py
new file mode 100644
index 0000000..442e713
--- /dev/null
+++ b/configs/constants.py
@@ -0,0 +1 @@
+ALLOWED_EXTENSIONS = set(['txt', 'csv'])
\ No newline at end of file
diff --git a/configs/data_path.yaml b/configs/data_path.yaml
new file mode 100644
index 0000000..593468b
--- /dev/null
+++ b/configs/data_path.yaml
@@ -0,0 +1,6 @@
+processed_data:
+ data_30_7:
+ data: processed_data/data_30_7/data.npz
+ model: processed_data/data_30_7/svm4.model
+ scaler: processed_data/data_30_7/scaler.sav
+
\ No newline at end of file
diff --git a/configs/utils.py b/configs/utils.py
new file mode 100644
index 0000000..9f07dd4
--- /dev/null
+++ b/configs/utils.py
@@ -0,0 +1,34 @@
+from flask import Flask, request, jsonify, make_response
+import jwt
+from functools import wraps
+from model import User
+from config import SECRET_KEY
+from configs.constants import ALLOWED_EXTENSIONS
+
+def token_required(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ token = None
+
+ if 'x-access-token' in request.headers:
+ token = request.headers['x-access-token']
+
+ if not token:
+ return jsonify({'message': 'Token is missing !!'}), 401
+
+ try:
+ data = jwt.decode(token, SECRET_KEY)
+ current_user = User.query\
+ .filter_by(username=data['username'])\
+ .first()
+ except:
+ return jsonify({
+ 'message': 'Token is invalid !!'
+ }), 401
+
+ return f(current_user, *args, **kwargs)
+
+ return decorated
+
+def allowed_filename(filename):
+ return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
\ No newline at end of file
diff --git a/controllers/__pycache__/acc_load_controller.cpython-37.pyc b/controllers/__pycache__/acc_load_controller.cpython-37.pyc
new file mode 100644
index 0000000..093598c
Binary files /dev/null and b/controllers/__pycache__/acc_load_controller.cpython-37.pyc differ
diff --git a/controllers/__pycache__/accelerometer_controllers.cpython-37.pyc b/controllers/__pycache__/accelerometer_controllers.cpython-37.pyc
new file mode 100644
index 0000000..305ffe1
Binary files /dev/null and b/controllers/__pycache__/accelerometer_controllers.cpython-37.pyc differ
diff --git a/controllers/__pycache__/response_data_controller.cpython-37.pyc b/controllers/__pycache__/response_data_controller.cpython-37.pyc
new file mode 100644
index 0000000..8e04e5e
Binary files /dev/null and b/controllers/__pycache__/response_data_controller.cpython-37.pyc differ
diff --git a/controllers/__pycache__/user_controllers.cpython-37.pyc b/controllers/__pycache__/user_controllers.cpython-37.pyc
new file mode 100644
index 0000000..b14e6a0
Binary files /dev/null and b/controllers/__pycache__/user_controllers.cpython-37.pyc differ
diff --git a/controllers/acc_load_controller.py b/controllers/acc_load_controller.py
new file mode 100644
index 0000000..1718ec9
--- /dev/null
+++ b/controllers/acc_load_controller.py
@@ -0,0 +1,86 @@
+from re import T
+from flask_cors import CORS
+import logging
+from Predict import StepPredict, Signal_new, predict
+
+# from asyncio import FastChildWatcher
+from datetime import datetime, timedelta
+from decimal import Decimal
+import os
+import numpy as np
+
+import time
+import pandas as pd
+import glob
+from flask_cors import CORS
+from flask import Flask, render_template, url_for, request, redirect, session, jsonify, send_file, Blueprint, make_response
+from werkzeug.utils import secure_filename
+from werkzeug.security import generate_password_hash, check_password_hash
+from model import ResponseData, db, Accelerometer
+import jwt
+from config import SECRET_KEY, UPLOAD_FOLDER
+from configs.utils import *
+from sqlalchemy import func
+import sqlalchemy as sa
+import csv
+
+acc_load = Blueprint('acc_load', __name__)
+HOME = "upload/by_hour/"
+
+write_file = False
+
+
+@acc_load.route('/api/acc_load', methods=['GET', 'POST'])
+@token_required
+def accelerometer_handler(current_user):
+ global write_file
+ if request.method == 'POST':
+ if request.is_json:
+ datas = request.json['data']
+ sign = convert_data(datas)
+ write_permission = request.json['writting']
+ write_file = write_permission
+ now = datetime.now()
+# HERE=============================================
+
+ if write_file:
+ data_ = []
+ hour = ""
+
+ for item in datas:
+ data_.append(
+ [item['timestamp'], item['x'], item['y'], item['z']])
+
+ file_path = os.path.join(HOME, str(current_user.id))
+ if not os.path.exists(file_path):
+ os.makedirs(file_path)
+
+ if(len(data_) > 0):
+ filename = data_[0]
+ hour = filename[0].split(
+ ":")[0] + "_" + filename[0].split(":")[1]
+
+ formated_file = hour.replace(" ", "-")
+
+ dir = os.path.join(file_path, formated_file + ".csv")
+
+ file_object = open(dir, 'a')
+ for item in data_:
+ text = item[0] + ";" + str(item[1]) + ";" + \
+ str(item[2]) + ";" + str(item[3]) + "\n"
+ file_object.write(text)
+ file_object.close()
+
+
+ return jsonify({
+ "message":"success"
+ }), 200
+
+def convert_data(data: list) -> list:
+ lst = []
+ # for i, e in enumerate(data):
+ # lst.append([data[i]['x'], data[i]['y'], data[i]['z']])
+ for item in data:
+ lst.append([item['x'], item['y'], item['z']])
+ lst = np.array(lst)
+ return lst
\ No newline at end of file
diff --git a/controllers/accelerometer_controllers.py b/controllers/accelerometer_controllers.py
new file mode 100644
index 0000000..71275d5
--- /dev/null
+++ b/controllers/accelerometer_controllers.py
@@ -0,0 +1,196 @@
+from re import T
+from flask_cors import CORS
+import logging
+from Predict import StepPredict, Signal_new, predict
+
+from asyncio import FastChildWatcher
+from datetime import datetime, timedelta
+from decimal import Decimal
+import os
+import numpy as np
+
+import time
+import pandas as pd
+import glob
+from flask_cors import CORS
+from flask import Flask, render_template, url_for, request, redirect, session, jsonify, send_file, Blueprint, make_response
+from werkzeug.utils import secure_filename
+from werkzeug.security import generate_password_hash, check_password_hash
+from model import ResponseData, db, Accelerometer
+import jwt
+from config import SECRET_KEY, UPLOAD_FOLDER
+from configs.utils import *
+from sqlalchemy import func
+import sqlalchemy as sa
+import csv
+
+accelerometer_router = Blueprint('accelerometer_router', __name__)
+HOME = "upload/by_hour/"
+
+iter = 0
+global_data = []
+@accelerometer_router.route('/api/accelerometer', methods=['GET', 'POST'])
+@token_required
+def accelerometer_handler(current_user):
+ global iter
+ global global_data
+ if request.method == 'POST':
+ if request.is_json:
+ # iter += 1
+ datas = request.json['data']
+ sign = convert_data(datas)
+ now = datetime.now()
+ # current_time = now.strftime("%Y-%m-%d-%H")
+
+# HERE=============================================
+
+ data = ResponseData.query.filter(
+ ResponseData.user_id == current_user.id).all()
+
+ steps = predict(sign)
+
+ resp = None
+ for item in data:
+ ts = item.timestamp
+ if now.year == ts.year and now.month == ts.month and now.day == ts.day and now.hour == ts.hour:
+ resp = item
+ break
+
+ if resp == None:
+ response_data = ResponseData(
+ steps=steps, timestamp=now, user_id=current_user.id)
+ db.session.add(response_data)
+ db.session.commit()
+ else:
+ resp.steps += steps
+ db.session.commit()
+
+ data = ResponseData.query.filter(
+ ResponseData.user_id == current_user.id).all()
+
+ res = 0
+
+ for acc in data:
+ ts = acc.timestamp
+
+ if now.year == ts.year and now.month == ts.month and now.day == ts.day:
+ res += acc.steps
+
+ return jsonify({
+ "steps": steps,
+ "total": res
+ }), 200
+
+
+ # ############## EDIT HERE BY HA 30/7 -- START
+ # sign = convert_data(datas)
+
+ # predict(sign)
+
+ # ####### EDIT BY HA 30/7 ---- STOP
+
+ # # print(datas)
+ # data_ = []
+ # hour = ""
+ # current_hour = ""
+
+ # for item in datas:
+ # data_.append([item['timestamp'],item['x'], item['y'], item['z']])
+
+ # # list all files in upload/by_hour
+ # # get lastest file => extract file_name
+ # # compare
+ # # glob
+
+ # # path of the directory
+ # path = "upload/by_hour"
+
+ # # Getting the list of directories
+ # dir = os.listdir(path)
+
+ # # Checking if the list is empty or not
+ # if len(dir) != 0:
+
+ # list_of_files = glob.glob('upload/by_hour/*') # * means all if need specific format then *.csv
+ # latest_file = max(list_of_files, key=os.path.getctime)
+
+ # now = datetime.now()
+ # current_time = now.strftime("%Y-%m-%d-%H")
+
+ # latest_info = latest_file.split(".csv")[0].split(HOME)[1]
+ # latest_hour = latest_info.split("_")[0]
+ # print(latest_hour)
+ # id_user = current_user.id
+
+ # if latest_info != str(current_time) + "_" + str(id_user):
+
+ # data = ResponseData.query.filter(ResponseData.user_id==current_user.id).all()
+ # print("latest_info", latest_info)
+ # print("current time", str(current_time) + "_" + str(id_user))
+ # print("check: ", latest_info == str(current_time) +"_"+ str(id_user))
+ # check = False
+ # for i in data:
+ # temp = str(i.timestamp).split(":")[0]
+ # if(temp == latest_hour):
+ # check = True
+ # break
+ # if(check == False):
+
+ # timestamp, x, y, z = np.genfromtxt(str(latest_file), delimiter=";", dtype='str',unpack=True)
+ # x_d = x[1:]
+ # y_d = y[1:]
+ # z_d = z[1:]
+ # x = np.array([float(item) for item in x_d])
+ # y = np.array([float(item) for item in y_d])
+ # z = np.array([float(item) for item in z_d])
+
+ # value = x.shape[0]
+
+ # steps = predict(latest_file)
+
+ # print("step: ", steps)
+
+ # response_data = ResponseData(steps=steps, timestamp=latest_hour, user_id=id_user)
+ # db.session.add(response_data)
+ # db.session.commit()
+
+
+ # print("[Run here]")
+ # if(len(data_) > 0):
+ # filename = data_[0]
+ # # fix here
+ # hour = filename[0].split(":")[0] + "_" + filename[0].split(":")[1]
+ # print("hour ", hour)
+ # # print("Type hour: ", type(hour))
+ # formated_file = hour.replace(" ", "-") + "_" + str(id_user)
+
+ # # print("formated hour: ",formated_hour)
+ # dir = os.path.join(HOME, formated_file + ".csv")
+
+ # file_object = open(dir, 'a')
+ # for item in data_:
+ # text = item[0] + ";" + str(item[1]) + ";" + str(item[2]) + ";" + str(item[3]) + "\n"
+ # file_object.write(text)
+ # file_object.close()
+
+ # return str("Uploadeds")
+
+# def process_data(data, iter):
+# lst = []
+# for item in data:
+# lst.extend(item)
+
+# df = pd.DataFrame(lst, columns=(["timestamp", "x", "y", "z"]))
+
+# now = datetime.now()
+# current_time = now.strftime("%H:%M:%S")
+# df.to_csv("/home/hatran_ame/DB_PY/Flask_SQLAlchemy/upload/by_hour/" + str(current_time) + ".csv", sep=";",index=False)
+
+def convert_data(data: list) -> list:
+ lst = []
+ # for i, e in enumerate(data):
+ # lst.append([data[i]['x'], data[i]['y'], data[i]['z']])
+ for item in data:
+ lst.append([item['x'], item['y'], item['z']])
+ lst = np.array(lst)
+ return lst
\ No newline at end of file
diff --git a/controllers/response_data_controller.py b/controllers/response_data_controller.py
new file mode 100644
index 0000000..aab0a85
--- /dev/null
+++ b/controllers/response_data_controller.py
@@ -0,0 +1,72 @@
+from datetime import datetime, date
+from decimal import Decimal
+import os
+import time
+from flask import Flask, render_template, url_for, request, redirect, session, jsonify, send_file, Blueprint, make_response
+from werkzeug.utils import secure_filename
+from werkzeug.security import generate_password_hash, check_password_hash
+from model import ResponseData, db, Accelerometer
+import jwt
+from config import SECRET_KEY, UPLOAD_FOLDER
+from configs.utils import *
+from sqlalchemy import func
+import sqlalchemy as sa
+import csv
+
+response_router = Blueprint('response_router', __name__)
+
+
+@response_router.route('/api/response', methods=['GET', 'POST'])
+@token_required
+def response_handler(current_user):
+ if request.method == 'POST':
+ try:
+ steps = request.form["steps"]
+ timestamp = request.form["timestamp"]
+ id_user = current_user.id
+
+ response_data = ResponseData(
+ steps=steps, timestamp=timestamp, user_id=id_user)
+ db.session.add(response_data)
+ db.session.commit()
+ res = {
+ 'msg': response_data.toDict()
+ }
+ except Exception as e:
+ return jsonify({"error": "Exception: {}".format(e)}), 400
+ return jsonify(res), 200
+ elif request.method == 'GET':
+ try:
+ query_date = request.args.get('date')
+ if query_date == None:
+ return jsonify({
+ "msg": "No date found"
+ }), 404
+
+ query_date = datetime. strptime(query_date, '%Y-%m-%d')
+ query_date = date(int(query_date.year), int(
+ query_date.month), int(query_date.day))
+
+ data = ResponseData.query.filter(ResponseData.user_id==current_user.id).all()
+
+ res = 0
+ step_arr = [0] * 24
+
+ for acc in data:
+ print(acc)
+ ts = acc.timestamp
+ h = ts.hour
+ print("h:", h)
+
+ if query_date.year == ts.year and query_date.month == ts.month and query_date.day == ts.day:
+ res += acc.steps
+ step_arr[int(h)] = acc.steps
+
+ print("steps: ", step_arr)
+
+ except Exception as e:
+ return jsonify({"error": "Exception: {}".format(e)}), 400
+ return jsonify({
+ "steps": res,
+ "step_arr": step_arr
+ }), 200
diff --git a/controllers/user_controllers.py b/controllers/user_controllers.py
new file mode 100644
index 0000000..51c0876
--- /dev/null
+++ b/controllers/user_controllers.py
@@ -0,0 +1,91 @@
+from datetime import datetime, timedelta
+from flask import Flask, render_template, url_for, request, redirect, session, jsonify, send_file, Blueprint, make_response
+from werkzeug.utils import secure_filename
+from werkzeug.security import generate_password_hash, check_password_hash
+from model import User, db
+import jwt
+from config import SECRET_KEY
+from configs.utils import *
+auth_router = Blueprint('auth_router', __name__)
+
+
+@auth_router.route('/api/register', methods=["POST"])
+def register_func():
+ try:
+ data = request.json['user_data']
+
+
+ username = data['username']
+ password = data['password']
+ password = generate_password_hash(password)
+ gender = data['gender']
+ birthday = data['birthday']
+
+ user = User.query\
+ .filter_by(username=username)\
+ .first()
+ if not user:
+
+ user = User(username=username, password=password, gender=gender,
+ birthday=birthday)
+
+ db.session.add(user)
+ db.session.commit()
+ return jsonify(user.toDict()), 200
+ else:
+ return make_response('User already exists. Please Log in.', 202)
+
+ except Exception as e:
+ return jsonify({"error": "Exception: {}".format(e)}), 500
+
+
+# get username and password from user -> call to db and filter and check username
+# - if username is valid --> check code_hash --> duration 10000 minutes
+@auth_router.route('/api/login', methods=["POST"])
+def login_func():
+ try:
+ data = request.json['user_data']
+
+
+ username = data['userName']
+ password = data['password']
+
+ user = User.query.filter_by(username=username).first()
+ if not user:
+ return make_response(
+ 'Could not verify',
+ 401
+ )
+
+ if check_password_hash(user.password, password):
+ token = jwt.encode({
+ 'username': user.username,
+ 'exp': datetime.utcnow() + timedelta(minutes=100000)
+ }, SECRET_KEY)
+
+ print(jsonify({'token': token.decode('UTF-8'), 'username': user.username, "birthday": user.birthday}))
+ return make_response(jsonify({'token': token.decode('UTF-8'), 'username': user.username, "birthday": user.birthday}), 201)
+ else:
+ return jsonify({
+ "msg": "Something went wrong"
+ }), 400
+ except Exception as e:
+ return jsonify({'Error': "{}".format(e)}), 500
+
+@auth_router.route('/api/me', methods=['GET', 'PUT'])
+@token_required
+def get_user(current_user):
+ if request.method == 'GET':
+ return jsonify({'users': current_user.toDict()})
+ elif request.method == 'PUT':
+ if request.form['username']:
+ return jsonify({
+ "msg": "Cannot change username"
+ }), 400
+ elif request.form['password']:
+ password = request.form['password']
+ password = generate_password_hash(password)
+ print(password)
+ current_user.password = password
+ db.session.commit()
+ return jsonify({'users': current_user.toDict()})
\ No newline at end of file
diff --git a/create_db.py b/create_db.py
deleted file mode 100644
index 90308b7..0000000
--- a/create_db.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from database import Acc, engine, Base
-
-Base.metadata.create_all(engine)
\ No newline at end of file
diff --git a/create_users.py b/create_users.py
deleted file mode 100644
index 4b6f7c0..0000000
--- a/create_users.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from database import Accelerometer, Session, engine
-
-local_session = Session(bind=engine)
diff --git a/data/processed_data/data_14_7/data.npz b/data/processed_data/data_14_7/data.npz
new file mode 100644
index 0000000..b37de78
Binary files /dev/null and b/data/processed_data/data_14_7/data.npz differ
diff --git a/data/processed_data/data_14_7/scaler.sav b/data/processed_data/data_14_7/scaler.sav
new file mode 100644
index 0000000..3391d0d
Binary files /dev/null and b/data/processed_data/data_14_7/scaler.sav differ
diff --git a/data/processed_data/data_14_7/svm.model b/data/processed_data/data_14_7/svm.model
new file mode 100644
index 0000000..dd27faf
Binary files /dev/null and b/data/processed_data/data_14_7/svm.model differ
diff --git a/data/processed_data/data_20_7/data.npz b/data/processed_data/data_20_7/data.npz
new file mode 100644
index 0000000..3334f9d
Binary files /dev/null and b/data/processed_data/data_20_7/data.npz differ
diff --git a/data/processed_data/data_20_7/scaler.sav b/data/processed_data/data_20_7/scaler.sav
new file mode 100644
index 0000000..5d9771f
Binary files /dev/null and b/data/processed_data/data_20_7/scaler.sav differ
diff --git a/data/processed_data/data_20_7/svm.model b/data/processed_data/data_20_7/svm.model
new file mode 100644
index 0000000..94e953b
Binary files /dev/null and b/data/processed_data/data_20_7/svm.model differ
diff --git a/data/processed_data/data_20_7/trials/rdFs.model b/data/processed_data/data_20_7/trials/rdFs.model
new file mode 100644
index 0000000..d5e2fdc
Binary files /dev/null and b/data/processed_data/data_20_7/trials/rdFs.model differ
diff --git a/data/processed_data/data_26_6/data.npz b/data/processed_data/data_26_6/data.npz
new file mode 100644
index 0000000..ebdc52a
Binary files /dev/null and b/data/processed_data/data_26_6/data.npz differ
diff --git a/data/processed_data/data_6_7/data.npz b/data/processed_data/data_6_7/data.npz
new file mode 100644
index 0000000..efbf2b4
Binary files /dev/null and b/data/processed_data/data_6_7/data.npz differ
diff --git a/data/processed_data/data_6_7/scaler.sav b/data/processed_data/data_6_7/scaler.sav
new file mode 100644
index 0000000..d64dbe1
Binary files /dev/null and b/data/processed_data/data_6_7/scaler.sav differ
diff --git a/data/processed_data/data_6_7/svm.model b/data/processed_data/data_6_7/svm.model
new file mode 100644
index 0000000..8b4d043
Binary files /dev/null and b/data/processed_data/data_6_7/svm.model differ
diff --git a/database/__pycache__/database.cpython-37.pyc b/database/__pycache__/database.cpython-37.pyc
deleted file mode 100644
index 5099262..0000000
Binary files a/database/__pycache__/database.cpython-37.pyc and /dev/null differ
diff --git a/database/database.py b/database/database.py
deleted file mode 100644
index 85486e5..0000000
--- a/database/database.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.ext.declarative import declarative_base
-from datetime import datetime
-
-import os
-
-SQLALCHEMY_DATABASE_URL = "mysql+pymysql://root:Long.0311@localhost:3306/acc_data"
-
-engine = create_engine(SQLALCHEMY_DATABASE_URL)
-
-Session = sessionmaker()
-Base = declarative_base()
\ No newline at end of file
diff --git a/model.py b/model.py
index e0566b3..53d6300 100644
--- a/model.py
+++ b/model.py
@@ -15,8 +15,14 @@ class Accelerometer(db.Model):
z = db.Column(db.Float)
timestamp = db.Column(db.DateTime)
+ FK_acc_user = db.relationship("User", backref=db.backref("accelerometers", uselist=False), lazy=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
+
def __repr__(self) -> str:
- return f" {self.timestamp}"
+ return f" {self.timestamp}"
+
+ def toDict(self):
+ return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}
class ResponseData(db.Model):
@@ -26,5 +32,21 @@ class ResponseData(db.Model):
steps = db.Column(db.Integer)
timestamp = db.Column(db.DateTime)
+ FK_response_user = db.relationship("User", backref=db.backref("response_datas", uselist=False), lazy=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
+
+ def toDict(self):
+ return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}
+
+
+class User(db.Model):
+ __tablename__ = "users"
+
+ id = db.Column(db.Integer, primary_key=True)
+ username = db.Column(db.String(50), unique=True, nullable=False)
+ password = db.Column(db.String(200))
+ gender = db.Column(db.Integer)
+ birthday = db.Column(db.DateTime)
+
def toDict(self):
- return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
+ return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}
diff --git a/processed_data/data_30_7/concatenated_data.npz b/processed_data/data_30_7/concatenated_data.npz
new file mode 100644
index 0000000..0c681d3
Binary files /dev/null and b/processed_data/data_30_7/concatenated_data.npz differ
diff --git a/processed_data/data_30_7/data.npz b/processed_data/data_30_7/data.npz
new file mode 100644
index 0000000..c4fec55
Binary files /dev/null and b/processed_data/data_30_7/data.npz differ
diff --git a/processed_data/data_30_7/scaler.sav b/processed_data/data_30_7/scaler.sav
new file mode 100644
index 0000000..eb1b7c2
Binary files /dev/null and b/processed_data/data_30_7/scaler.sav differ
diff --git a/processed_data/data_30_7/svm.model b/processed_data/data_30_7/svm.model
new file mode 100644
index 0000000..90f8369
Binary files /dev/null and b/processed_data/data_30_7/svm.model differ
diff --git a/processed_data/data_30_7/svm2.model b/processed_data/data_30_7/svm2.model
new file mode 100644
index 0000000..ad6243e
Binary files /dev/null and b/processed_data/data_30_7/svm2.model differ
diff --git a/processed_data/data_30_7/svm3.model b/processed_data/data_30_7/svm3.model
new file mode 100644
index 0000000..83c4f7f
Binary files /dev/null and b/processed_data/data_30_7/svm3.model differ
diff --git a/processed_data/data_30_7/svm4.model b/processed_data/data_30_7/svm4.model
new file mode 100644
index 0000000..f0457bb
Binary files /dev/null and b/processed_data/data_30_7/svm4.model differ
diff --git a/processed_data/data_30_7/trials/AdaBoostClassifier.model b/processed_data/data_30_7/trials/AdaBoostClassifier.model
new file mode 100644
index 0000000..6d85028
Binary files /dev/null and b/processed_data/data_30_7/trials/AdaBoostClassifier.model differ
diff --git a/processed_data/data_30_7/trials/DecisionTreeClassifier.model b/processed_data/data_30_7/trials/DecisionTreeClassifier.model
new file mode 100644
index 0000000..1ad9e1a
Binary files /dev/null and b/processed_data/data_30_7/trials/DecisionTreeClassifier.model differ
diff --git a/processed_data/data_30_7/trials/GaussianNB.model b/processed_data/data_30_7/trials/GaussianNB.model
new file mode 100644
index 0000000..da9097b
Binary files /dev/null and b/processed_data/data_30_7/trials/GaussianNB.model differ
diff --git a/processed_data/data_30_7/trials/GradientBoostingClassifier.model b/processed_data/data_30_7/trials/GradientBoostingClassifier.model
new file mode 100644
index 0000000..1722bce
Binary files /dev/null and b/processed_data/data_30_7/trials/GradientBoostingClassifier.model differ
diff --git a/processed_data/data_30_7/trials/KNeighborsClassifier.model b/processed_data/data_30_7/trials/KNeighborsClassifier.model
new file mode 100644
index 0000000..c18dc20
Binary files /dev/null and b/processed_data/data_30_7/trials/KNeighborsClassifier.model differ
diff --git a/processed_data/data_30_7/trials/LinearDiscriminantAnalysis.model b/processed_data/data_30_7/trials/LinearDiscriminantAnalysis.model
new file mode 100644
index 0000000..7884acb
Binary files /dev/null and b/processed_data/data_30_7/trials/LinearDiscriminantAnalysis.model differ
diff --git a/processed_data/data_30_7/trials/QuadraticDiscriminantAnalysis.model b/processed_data/data_30_7/trials/QuadraticDiscriminantAnalysis.model
new file mode 100644
index 0000000..26cfc68
Binary files /dev/null and b/processed_data/data_30_7/trials/QuadraticDiscriminantAnalysis.model differ
diff --git a/processed_data/data_30_7/trials/RandomForestClassifier.model b/processed_data/data_30_7/trials/RandomForestClassifier.model
new file mode 100644
index 0000000..7d514e1
Binary files /dev/null and b/processed_data/data_30_7/trials/RandomForestClassifier.model differ
diff --git a/processed_data/data_30_7/trials/SVC.model b/processed_data/data_30_7/trials/SVC.model
new file mode 100644
index 0000000..596ba28
Binary files /dev/null and b/processed_data/data_30_7/trials/SVC.model differ
diff --git a/run_server.py b/run_server.py
index 3a55f29..662a351 100644
--- a/run_server.py
+++ b/run_server.py
@@ -1,4 +1,3 @@
-from crypt import methods
from distutils.log import debug
from flask import Flask, request, jsonify, make_response
import json
@@ -8,50 +7,23 @@
from model import ResponseData, db
import config
from model import Accelerometer
+
+from controllers.user_controllers import auth_router
+from controllers.accelerometer_controllers import accelerometer_router
+from controllers.response_data_controller import response_router
+from controllers.acc_load_controller import acc_load
+
app = Flask(__name__)
-# config = Config()
app.config.from_object('config')
db.init_app(app=app)
+api = Api(app)
-
-@app.route('/', methods=['GET'])
-def get():
- try:
- data = ResponseData.query.all()
- res = []
- for step in data:
- res.append(step.toDict())
-
- except Exception as e:
- return jsonify({"error": "Exception: {}".format(e)}), 400
-
- return jsonify(res)
-
-
-@app.route('/', methods=['POST'])
-def post():
- try:
- x = request.form['x']
- y = request.form['y']
- z = request.form['z']
- timestamp = request.form['timestamp']
-
- acc = Accelerometer(x=x, y=y, z=z, timestamp=timestamp)
-
- db.session.add(acc)
- db.session.commit()
- res = {
- 'x': acc.x,
- 'y': acc.y,
- 'z': acc.z,
- 'timestamp': acc.timestamp
- }
- except Exception as e:
- return jsonify({"error": "Exception: {}".format(e)}), 400
- return jsonify(res), 200
-
+app.register_blueprint(auth_router)
+app.register_blueprint(accelerometer_router)
+app.register_blueprint(response_router)
+app.register_blueprint(acc_load)
if __name__ == "__main__":
app.run(host=config.APP_HOST, port=config.APP_PORT,
diff --git a/upload/data/test_1657798218.3466141.txt b/upload/data/test_1657798218.3466141.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657798218.3466141.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
diff --git a/upload/data/test_1657798224.3352017.txt b/upload/data/test_1657798224.3352017.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657798224.3352017.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
diff --git a/upload/data/test_1657798379.2496636.txt b/upload/data/test_1657798379.2496636.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657798379.2496636.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
diff --git a/upload/data/test_1657798382.4611232.txt b/upload/data/test_1657798382.4611232.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657798382.4611232.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
diff --git a/upload/data/test_1657798476.617716.txt b/upload/data/test_1657798476.617716.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657798476.617716.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
diff --git a/upload/data/test_1657814912.6784687.txt b/upload/data/test_1657814912.6784687.txt
new file mode 100644
index 0000000..25df680
--- /dev/null
+++ b/upload/data/test_1657814912.6784687.txt
@@ -0,0 +1,20 @@
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09
+1 1 1 2022-07-09