diff --git a/.DS_Store b/.DS_Store index 7951bdd..7267f8b 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/.gitignore b/.gitignore index 6005128..953e83c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ merged.csv *.csv *.csv +all_data.dat diff --git a/CSV_fall_data/.DS_Store b/CSV_fall_data/.DS_Store index f3a3342..b93924a 100644 Binary files a/CSV_fall_data/.DS_Store and b/CSV_fall_data/.DS_Store differ diff --git a/__pycache__/merger.cpython-35.pyc b/__pycache__/merger.cpython-35.pyc index e8758bc..9a08865 100644 Binary files a/__pycache__/merger.cpython-35.pyc and b/__pycache__/merger.cpython-35.pyc differ diff --git a/__pycache__/merger.cpython-37.pyc b/__pycache__/merger.cpython-37.pyc index c15f9cc..ce3e506 100644 Binary files a/__pycache__/merger.cpython-37.pyc and b/__pycache__/merger.cpython-37.pyc differ diff --git a/a.h5 b/a.h5 index 7342b8e..15f0dd5 100644 Binary files a/a.h5 and b/a.h5 differ diff --git a/abcdeg_l.h5 b/abcdeg_l.h5 deleted file mode 100644 index 9e957f4..0000000 Binary files a/abcdeg_l.h5 and /dev/null differ diff --git a/derivative_generator.py b/derivative_generator.py index 5dbd71e..1ba2fbd 100644 --- a/derivative_generator.py +++ b/derivative_generator.py @@ -19,7 +19,7 @@ def nth_derivative (N, order = 1): def derivative(n): if type(n) is not np.ndarray: n = np.array(n) - return np.gradient(n)[1] + return np.gradient(n)[0] def gradient(y, dx=1): """Returns second order accurate derivative of y using constant step size dx.""" diff --git a/fall_detection_1.h5 b/fall_detection_1.h5 index 3a5421a..cdb8785 100644 Binary files a/fall_detection_1.h5 and b/fall_detection_1.h5 differ diff --git a/main.py b/main.py deleted file mode 100644 index 3002246..0000000 --- a/main.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sun Jul 1 00:41:01 2018 - -@author: davitisoselia -""" -import merger - - -modeln='fall_detection_1.h5' # model name -merged_path = 'merged.csv' -import os.path - -if not os.path.isfile(merged_path): - merger.merge() - -falls=[] #saves fall start-end moments - -with open('merged.csv') as csv: - content = csv.readlines() -for i in range(len(content)): - if('tart' in content[i]): - falls.append([i]) - if('nd' in content[i]): - falls[-1].append(i) - content[i] = content[i].split(',') - - -#content = content[::10] - -''' - - if (len(content[i][-1]) > 2): - print(i) - ''' -import numpy as np -import sys - - - -def generate_numpy(point, length = 500): - segment = [] - falls = 0; - fell = [[0,1]] - for i in range(point, point + length): - if ('all' in content[i][-1]): - falls+=1 - if i%10==0: - segment.append(content[i][:-2]) - if (falls == 1): - return - elif(falls>1): - fell = [[1,0]] - for i in range(len(segment)): - for j in range(len(segment[i])): - segment[i][j] = float(segment[i][j]) - segment = np.array(segment) - return segment, fell - -ml,mk = generate_numpy(5) - - - - - - - -sensorNum = ml.shape[1] - - - -print(len((content[35232]))) -print(len(content[0])) - - - - - - - -from keras.models import Sequential -from keras.layers import LSTM -#from keras.layers import CuDNNLSTM as LSTM - -from keras.layers import Dense -from keras.layers import Conv1D -import numpy as np -from keras.models import load_model - - -if not os.path.isfile(modeln): - model = Sequential() - model.add(LSTM(25, return_sequences=True, stateful=True, input_shape=(None, sensorNum), - batch_input_shape=(1, None, sensorNum))) - model.add(LSTM(20, recurrent_dropout=0.2)) - #model.add(Dense(30, activation='relu')) - #model.add(Dense(10, activation='relu')) - model.add(Dense(2, activation='sigmoid')) - model.compile(loss='binary_crossentropy', - optimizer='rmsprop', - metrics=['accuracy']) -else: - model = load_model(modeln) - - -import random - - -def get_fall(point = 0, length = random.randint(300, 1500)): - if point == 0: - point = falls[random.randint(0, len(falls))][0] - random.randint(100, 500) - segment , fell = generate_numpy(point, length) - return segment , fell - -def checkresult_confusion(point = random.randint(1, len(content)-50), length = random.randint(300, 1500), check_fall = False, confusion_matrix = [[0,0],[0,0]]): - np_arr, y = get_fall() if check_fall else generate_numpy(point, length) - np_arr = np_arr / temp_storage - x_train = np.transpose(np_arr).reshape(1,np_arr.shape[0],np_arr.shape[1]) - #x_train = temp_storage / 50 - y_train = np.array(y) - prediction = model.predict(x_train) - if (np.argmax(y_train)==np.argmax(prediction) and np.argmax(y_train) == 0): - confusion_matrix[0][0] += 1 - elif (np.argmax(y_train)==np.argmax(prediction) and np.argmax(y_train) == 1): - confusion_matrix[1][1] += 1 - elif (np.argmax(y_train)!=np.argmax(prediction) and np.argmax(y_train) == 1): - confusion_matrix[1][0] += 1 - elif (np.argmax(y_train)!=np.argmax(prediction) and np.argmax(y_train) == 0): - confusion_matrix[0][1] += 1 - return (np.argmax(y_train)==np.argmax(prediction)), confusion_matrix - -''' -def checkresult(point = random.randint(1, len(content)-50), length = random.randint(300, 1500), check_fall = False): - np_arr, y = get_fall() if check_fall else generate_numpy(point, length) - np_arr = np_arr / temp_storage - x_train = np.transpose(np_arr).reshape(1,np_arr.shape[0],np_arr.shape[1]) - #x_train = temp_storage / 50 - y_train = np.array(y) - prediction = model.predict(x_train) - #print(y_train) - #print(prediction) - return (np.argmax(y_train)==np.argmax(prediction)) -''' - -from keras.models import load_model -j = 0 -iter = 0 -balance_needed = False -lastnp = np.array([]) -temp_storage = '43.39 11.03 16.21 85.62 178.8 172.6 40.39 18.69 7.767 137.4 105.2 107.9 59.82 107.9 51.32 206.8 54.73 56.91 34.09 29 46.25 49.62 47.7 96.14 83.78 178.1 178.8 87.09 82.72 177.8 178.8 136.2 135.6 18.03 37.27 39.53 28.23 35.61 37.22 3.644 67.37 21.58 63.19 82.38 82.69 178.9 82.62 180 180 86.57 180 179.7 80.72 179.9 80.44 79.99 107.7 179.8 89.15 178.1 -14.97 66.98 180 50.26 71.05 109.1 179.9 84.46 179.8 180 86.76 180 179.8 89.57 179.8 180 86.59 179.4 179.8 86.43 179.9 179.8 87.1 179.8 179.7 88.29 179.7 1791 1377 2042 1881 1604 958.5 2965 3014 2036 2325 2396 2078 3940 3616 3647 4682 3080 1474 3861 6624 2780 4152 4875 5995 3818 2253 641.1 3324 4992 5953 2714 3502 1516 8552 2113 1574 4586 15890 14370 4300 2691 1406 14470 10660 2981 11250 16000 14240 0.9126 0.4787 0.7286 0.8671 0.01697 0.8622 0.7929 -0.03522 0.3384 0.7069 0.6081 0.8366 0.7314 0.6626 0.7502 0.6727 0.6694 0.9436 0.2001 0.8859 0.04322 0.5211 0.9018 0.9656 0.9665 0.2635 0.9052 0.9012 0.7843 0.7929 0.4846 0.8235 0.7375 0.7812 0.6371 0.7238 0.4928 0.4771 0.8543 0.7993 0.8932 0.6572 0.4866 0.01178 0.1309 0.718 0.7586 0.7251'.split(' ') -normalizer = [] -for value in temp_storage: - normalizer.append(float(value)) -temp_storage = np.array(normalizer) - -def test(): - matrix = [[0,0],[0,0]] - fall = True - correct = 0 - i = 0 - while i < 1000: - try: - temp, matrix = checkresult_confusion(check_fall = fall, confusion_matrix = matrix) - correct += (temp) - i+=1 - fall = not fall - except: - pass - - print('accuracy: ') - print(correct) - print(matrix) - -while(iter<50000): - j=random.randint(1, len(content)-50) - #avred = not avred - try: - #print(iter) - if balance_needed: - np_arr, y = get_fall() - else: - np_arr, y = generate_numpy(j) - lastnp = np_arr - np_arr = np_arr / temp_storage - #x_train = x_train / 50 - y_train = np.array(y) - x_train = np.transpose(np_arr).reshape(1,np_arr.shape[0],np_arr.shape[1]) - model.fit(x_train, y_train, batch_size=1, nb_epoch=1, shuffle=False, verbose=0) - #print(j) - #j=random.randint(1, 5) - #j=random.randint(1264, 1896) - if(iter % 1000 == 0): - model.save(modeln) - test() - print(iter) - iter+=1; - balance_needed = not balance_needed - #print('here') - except (TypeError,IndexError): - #print('error raised at index ' +str(j)) - #print(sys.exc_info()[0]) - pass - except: - print(sys.exc_info()[0]) - raise \ No newline at end of file diff --git a/main_single_time.py b/main_single_time.py new file mode 100644 index 0000000..36b5198 --- /dev/null +++ b/main_single_time.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jul 1 00:41:01 2018 + +@author: davitisoselia +""" +import merger + + +modeln='fall_detection_1.h5' # model name +merged_path = 'merged.csv' +import os.path + +if not os.path.isfile(merged_path): + merger.merge() + +falls=[] #saves fall start-end moments + +with open('merged.csv') as csv: + content = csv.readlines() +for i in range(int(len(content))): + if('tart' in content[i]): + falls.append([i]) + if('nd' in content[i]): + falls[-1].append(i) + content[i] = content[i].split(',') + + +#content = content[::10] + +''' + + if (len(content[i][-1]) > 2): + print(i) + ''' +import numpy as np +import sys + + + + + +def row_to_numpy(point): + segment = [] + fell = [0] + if (int(content[point][-2])) > 0: + fell = [1] + segment = (content[point][:-2]) + for j in range(len(segment)): + segment[j] = float(segment[j]) + segment = np.array(segment) + return segment, fell + +ml,mk = row_to_numpy(5) + + +sensorNum = ml.shape[0] + + +''' +print(len((content[35232]))) +''' +print(len(content[0])) + + + + + + + + +import numpy as np + +import random + +def get_fall(point = 0): + fell = [0] + while fell == [0]: + point = falls[random.randint(0, len(falls))][0] + random.randint(10, 100) + segment , fell = row_to_numpy(point) + return segment , fell + + +j = 0 +iter = 0 +balance_needed = False +lastnp = np.array([]) +temp_storage = '43.39 11.03 16.21 85.62 178.8 172.6 40.39 18.69 7.767 137.4 105.2 107.9 59.82 107.9 51.32 206.8 54.73 56.91 34.09 29 46.25 49.62 47.7 96.14 83.78 178.1 178.8 87.09 82.72 177.8 178.8 136.2 135.6 18.03 37.27 39.53 28.23 35.61 37.22 3.644 67.37 21.58 63.19 82.38 82.69 178.9 82.62 180 180 86.57 180 179.7 80.72 179.9 80.44 79.99 107.7 179.8 89.15 178.1 -14.97 66.98 180 50.26 71.05 109.1 179.9 84.46 179.8 180 86.76 180 179.8 89.57 179.8 180 86.59 179.4 179.8 86.43 179.9 179.8 87.1 179.8 179.7 88.29 179.7 1791 1377 2042 1881 1604 958.5 2965 3014 2036 2325 2396 2078 3940 3616 3647 4682 3080 1474 3861 6624 2780 4152 4875 5995 3818 2253 641.1 3324 4992 5953 2714 3502 1516 8552 2113 1574 4586 15890 14370 4300 2691 1406 14470 10660 2981 11250 16000 14240 0.9126 0.4787 0.7286 0.8671 0.01697 0.8622 0.7929 -0.03522 0.3384 0.7069 0.6081 0.8366 0.7314 0.6626 0.7502 0.6727 0.6694 0.9436 0.2001 0.8859 0.04322 0.5211 0.9018 0.9656 0.9665 0.2635 0.9052 0.9012 0.7843 0.7929 0.4846 0.8235 0.7375 0.7812 0.6371 0.7238 0.4928 0.4771 0.8543 0.7993 0.8932 0.6572 0.4866 0.01178 0.1309 0.718 0.7586 0.7251'.split(' ') +normalizer = [] +for value in temp_storage: + normalizer.append(float(value)) +temp_storage = np.array(normalizer) + + +from numpy import loadtxt +from xgboost import XGBClassifier +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score +''' +np_arr, y = get_fall() if False else row_to_numpy(5) +np_arr = np_arr / temp_storage +y_train = np.array(y) +x_train = np.transpose(np_arr).reshape(1,sensorNum) + +# fit model no training data +model = XGBClassifier() +model.fit(x_train, y_train) +y_pred = model.predict(x_train) +predictions = [round(value) for value in y_pred] +''' + + +confusion_matrix = [[0,0],[0,0]] +def checkresult_confusion(point = random.randint(1, len(content)-50), length = random.randint(300, 1500), check_fall = False, confusion_matrix = [[0,0],[0,0]]): + np_arr, y = get_fall() if check_fall else row_to_numpy(point) + np_arr = np_arr / temp_storage + y_train = np.array(y) + x_train = np.transpose(np_arr).reshape(1,sensorNum) + prediction = model.predict(x_train) + #print(y_train) + #print(prediction) + if (y_train[0]==round(prediction[0][0]) and y_train[0] == 1): + confusion_matrix[0][0] += 1 + elif (y_train[0]==round(prediction[0][0]) and y_train[0] == 0): + confusion_matrix[1][1] += 1 + elif (y_train[0]!=round(prediction[0][0]) and y_train[0] == 0): + confusion_matrix[1][0] += 1 + elif (y_train[0]!=round(prediction[0][0]) and y_train[0] == 1): + confusion_matrix[0][1] += 1 + return (y_train[0]==round(prediction[0][0])), confusion_matrix + +modeln='a.h5' + +def test(): + matrix = [[0,0],[0,0]] + fall = True + correct = 0 + i = 0 + while i < 100: + try: + temp, matrix = checkresult_confusion(check_fall = fall, confusion_matrix =matrix ) + correct += (temp) + i+=1 + fall = not fall + except: + print(sys.exc_info()[0]) + + print('accuracy: ') + print(correct) + print(matrix) +X = [] +Y = [] +iter = 0 +# prep numpy for random forest +balance_needed = False +while(iter<2000): + j=random.randint(1, int((len(content)-50))) + #print(j) + #avred = not avred + try: + #print(iter) + #print('Balance 0 : ' + str(balance_needed)) + if balance_needed: + np_arr, y = get_fall() + else: + np_arr, y = row_to_numpy(j) + #print('Balance : ' + str(balance_needed)) + lastnp = np_arr + np_arr = np_arr / temp_storage + y_train = np.array(y) + x_train = np.transpose(np_arr).reshape(sensorNum) + X.append(x_train) + Y.append(y_train) + iter+=1; + balance_needed = not balance_needed + except (TypeError,IndexError): + print('error raised at index ' +str(j)) + print(sys.exc_info()[0]) + pass + except: + print(sys.exc_info()[0]) + raise + + + +j = 0 +iter = 0 +balance_needed = False +lastnp = np.array([]) + +X_1 = [] +Y_1 = [] +iter = 0 +# prep numpy for random forest +balance_needed = False +while(iter<400): + j=random.randint(1, int((len(content)-50))) + #print(j) + #avred = not avred + try: + #print(iter) + #print('Balance 0 : ' + str(balance_needed)) + if balance_needed: + np_arr, y = get_fall() + else: + np_arr, y = row_to_numpy(j) + #print('Balance : ' + str(balance_needed)) + lastnp = np_arr + np_arr = np_arr / temp_storage + y_train = np.array(y) + x_train = np.transpose(np_arr).reshape(sensorNum) + X_1.append(x_train) + Y_1.append(y_train) + iter+=1; + balance_needed = not balance_needed + except (TypeError,IndexError): + print('error raised at index ' +str(j)) + print(sys.exc_info()[0]) + pass + except: + print(sys.exc_info()[0]) + raise + + +X_t = np.array(X) +Y_t = np.array(Y) +Y_t = Y_t.reshape(Y_t.shape[0]) +X_test = np.array(X_1) +Y_test = np.array(Y_1) +Y_test = Y_test.reshape(Y_test.shape[0]) + + +model = XGBClassifier() +model.fit(X_t, Y_t) + + + +y_pred = model.predict(X_t) +predictions = [round(value) for value in y_pred] +# evaluate predictions +accuracy = accuracy_score(Y_t, predictions) +print("Training Accuracy: %.2f%%" % (accuracy * 100.0)) +import sklearn +print(sklearn.metrics.precision_score(Y_t, predictions)) +print(sklearn.metrics.recall_score(Y_t, predictions)) + + + + + +y_pred = model.predict(X_test) +predictions = [round(value) for value in y_pred] +# evaluate predictions +accuracy = accuracy_score(Y_test, predictions) +print("Testing Accuracy: %.2f%%" % (accuracy * 100.0)) +import sklearn +print(sklearn.metrics.precision_score(Y_test, predictions)) +print(sklearn.metrics.recall_score(Y_test, predictions)) +''' +confusion_matrix = [[0,0],[0,0]] +def checkresult_confusion(point = random.randint(1, len(content)-50), length = random.randint(300, 1500), check_fall = False): + np_arr, y = get_fall() if check_fall else generate_numpy(point, length) + np_arr = np_arr / temp_storage + y_train = np.array(y) + x_train = np.transpose(np_arr).reshape(1,sensorNum) + prediction = model.predict(x_train) + print(y_train) + print(prediction) + if (y_train[0]==round(prediction[0][0]) and y_train[0] == 1): + confusion_matrix[0][0] += 1 + elif (y_train[0]==round(prediction[0][0]) and y_train[0] == 0): + confusion_matrix[1][1] += 1 + elif (y_train[0]!=round(prediction[0][0]) and y_train[0] == 0): + confusion_matrix[1][0] += 1 + elif (y_train[0]!=round(prediction[0][0]) and y_train[0] == 1): + confusion_matrix[0][1] += 1 + return (y_train[0]==round(prediction[0][0])) +''' +''' +from xgboost import plot_tree +plot_tree(model, num_trees=7) +''' + + + + + + + +''' +#train boosted decision tree + +from sklearn.ensemble import RandomForestClassifier +#from sklearn.datasets import make_classification + +def random_forests_create(): + rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', + max_depth=3, max_features='auto', max_leaf_nodes=None, + min_impurity_decrease=0.0, min_impurity_split=None, + min_samples_leaf=1, min_samples_split=2, + min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, + oob_score=False, random_state=0, verbose=0, warm_start=False) + return rf + + +def random_forests_train(rf, X_train, Y_train): + rf.fit(X_train, Y_train) + return rf + + + #rf.fit(get_fall[0], get_fall[1]) + + + +''' \ No newline at end of file diff --git a/merger.py b/merger.py index 6342ad4..83a4592 100644 --- a/merger.py +++ b/merger.py @@ -13,6 +13,7 @@ def strip(path): stripped = "" i = 0 last = "" + falling = False for row in content: if (i==4): if ('Marker' not in row): @@ -23,11 +24,15 @@ def strip(path): new_row = row if (end): new_row = new_row[:-1]+',0,\n' + elif ('all' in new_row): + falling = ~falling new_row = new_row.split(',') if(start): new_row = new_row[3:] else: - new_row = new_row[1:] + new_row = new_row[1:] + if falling: + new_row[-2] = '1' new_row = ','.join(new_row) last = new_row stripped += new_row diff --git a/pca.py b/pca.py new file mode 100644 index 0000000..f08d1db --- /dev/null +++ b/pca.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + +labels = '"Cervical Flexion,deg","Cervical Lateral - RT,deg","Cervical Axial - RT,deg","Lumbar Flexion,deg","Lumbar Lateral - RT,deg","Lumbar Axial - RT,deg","Thoracic Flexion,deg","Thoracic Lateral - RT,deg","Thoracic Axial - RT,deg","Elbow Flexion LT,deg","Elbow Flexion RT,deg","Shoulder Total Flexion LT,deg","Shoulder Total Flexion RT,deg","Shoulder Flexion LT,deg","Shoulder Flexion RT,deg","Shoulder Abduction LT,deg","Shoulder Abduction RT,deg","Shoulder Rotation - out LT,deg","Shoulder Rotation - out RT,deg","Wrist Extension LT,deg","Wrist Extension RT,deg","Wrist Radial LT,deg","Wrist Radial RT,deg","Wrist Supination LT,deg","Wrist Supination RT,deg","Hip Flexion LT,deg","Hip Flexion RT,deg","Hip Abduction LT,deg","Hip Abduction RT,deg","Hip Rotation - out LT,deg","Hip Rotation - out RT,deg","Knee Flexion LT,deg","Knee Flexion RT,deg","Ankle Dorsiflexion LT,deg","Ankle Dorsiflexion RT,deg","Ankle Inversion LT,deg","Ankle Inversion RT,deg","Ankle Abduction LT,deg","Ankle Abduction RT,deg","Head course,deg","Head pitch,deg","Head roll,deg","Upper spine course,deg","Upper spine pitch,deg","Upper spine roll,deg","Upper arm course LT,deg","Upper arm pitch LT,deg","Upper arm roll LT,deg","Forearm course LT,deg","Forearm pitch LT,deg","Forearm roll LT,deg","Hand course LT,deg","Hand pitch LT,deg","Hand roll LT,deg","Upper arm course RT,deg","Upper arm pitch RT,deg","Upper arm roll RT,deg","Forearm course RT,deg","Forearm pitch RT,deg","Forearm roll RT,deg","Hand course RT,deg","Hand pitch RT,deg","Hand roll RT,deg","Lower spine course,deg","Lower spine pitch,deg","Lower spine roll,deg","Pelvis course,deg","Pelvis pitch,deg","Pelvis roll,deg","Thigh course LT,deg","Thigh pitch LT,deg","Thigh roll LT,deg","Shank course LT,deg","Shank pitch LT,deg","Shank roll LT,deg","Foot course LT,deg","Foot pitch LT,deg","Foot roll LT,deg","Thigh course RT,deg","Thigh pitch RT,deg","Thigh roll RT,deg","Shank course RT,deg","Shank pitch RT,deg","Shank roll RT,deg","Foot course RT,deg","Foot pitch RT,deg","Foot roll RT,deg","Head Accel Sensor X,mG","Head Accel Sensor Y,mG","Head Accel Sensor Z,mG","Upper spine Accel Sensor X,mG","Upper spine Accel Sensor Y,mG","Upper spine Accel Sensor Z,mG","Upper arm Accel Sensor X LT,mG","Upper arm Accel Sensor Y LT,mG","Upper arm Accel Sensor Z LT,mG","Forearm Accel Sensor X LT,mG","Forearm Accel Sensor Y LT,mG","Forearm Accel Sensor Z LT,mG","Hand Accel Sensor X LT,mG","Hand Accel Sensor Y LT,mG","Hand Accel Sensor Z LT,mG","Upper arm Accel Sensor X RT,mG","Upper arm Accel Sensor Y RT,mG","Upper arm Accel Sensor Z RT,mG","Forearm Accel Sensor X RT,mG","Forearm Accel Sensor Y RT,mG","Forearm Accel Sensor Z RT,mG","Hand Accel Sensor X RT,mG","Hand Accel Sensor Y RT,mG","Hand Accel Sensor Z RT,mG","Lower spine Accel Sensor X,mG","Lower spine Accel Sensor Y,mG","Lower spine Accel Sensor Z,mG","Pelvis Accel Sensor X,mG","Pelvis Accel Sensor Y,mG","Pelvis Accel Sensor Z,mG","Thigh Accel Sensor X LT,mG","Thigh Accel Sensor Y LT,mG","Thigh Accel Sensor Z LT,mG","Shank Accel Sensor X LT,mG","Shank Accel Sensor Y LT,mG","Shank Accel Sensor Z LT,mG","Foot Accel Sensor X LT,mG","Foot Accel Sensor Y LT,mG","Foot Accel Sensor Z LT,mG","Thigh Accel Sensor X RT,mG","Thigh Accel Sensor Y RT,mG","Thigh Accel Sensor Z RT,mG","Shank Accel Sensor X RT,mG","Shank Accel Sensor Y RT,mG","Shank Accel Sensor Z RT,mG","Foot Accel Sensor X RT,mG","Foot Accel Sensor Y RT,mG","Foot Accel Sensor Z RT,mG","Head Rot X,","Head Rot Y,","Head Rot Z,","Upper spine Rot X,","Upper spine Rot Y,","Upper spine Rot Z,","LT Upper arm Rot X,","LT Upper arm Rot Y,","LT Upper arm Rot Z,","LT Forearm Rot X,","LT Forearm Rot Y,","LT Forearm Rot Z,","LT Hand Rot X,","LT Hand Rot Y,","LT Hand Rot Z,","RT Upper arm Rot X,","RT Upper arm Rot Y,","RT Upper arm Rot Z,","RT Forearm Rot X,","RT Forearm Rot Y,","RT Forearm Rot Z,","RT Hand Rot X,","RT Hand Rot Y,","RT Hand Rot Z,","Lower spine Rot X,","Lower spine Rot Y,","Lower spine Rot Z,","Pelvis Rot X,","Pelvis Rot Y,","Pelvis Rot Z,","LT Thigh Rot X,","LT Thigh Rot Y,","LT Thigh Rot Z,","LT Shank Rot X,","LT Shank Rot Y,","LT Shank Rot Z,","LT Foot Rot X,","LT Foot Rot Y,","LT Foot Rot Z,","RT Thigh Rot X,","RT Thigh Rot Y,","RT Thigh Rot Z,","RT Shank Rot X,","RT Shank Rot Y,","RT Shank Rot Z,","RT Foot Rot X,","RT Foot Rot Y,","RT Foot Rot Z,"' +labels_1 = labels +labels = labels.split('","') +len(labels) +NN = [4.3942465e-17, 0.9976151, 3.8842754e-13, 0.0047140205, 0.99059063, 1.6294735e-14, 0.99838316, 0.9963187, 0.9980227, 0.26315832, 0.51032656, 0.56320333, 0.8683201, 0.9979792, 0.0, 2.1185279e-29, 0.99890924, 0.99755543, 0.9976985, 0.96669143, 0.13358483, 0.56574714, 0.0, 0.013004635, 0.99778384, 0.98736006, 0.549251, 0.999435, 0.9985744, 0.39126655, 0.1471329, 0.026426328, 7.1076603e-13, 0.99753726, 0.97508097, 0.9983169, 0.99829143, 0.9981811, 0.9980869, 0.9980781, 0.99714977, 0.9980982, 0.9980957, 0.9968991, 0.9917944, 0.99744797, 0.99159557, 4.183078e-07, 0.6490243, 0.9543748, 0.10686118, 0.99741745, 0.75481164, 0.9985587, 0.99806374, 3.7024948e-09, 0.5356963, 0.9846845, 0.08089858, 0.98945516, 0.99810994, 0.98735374, 0.9978077, 0.99818265, 0.99801934, 0.99664754, 0.99812955, 0.013492543, 0.9884362, 0.997945, 0.99148715, 0.99842024, 0.99738675, 0.9980786, 0.9976648, 0.9959525, 0.9912203, 0.99812824, 0.99814296, 0.8414523, 0.0025003229, 0.99810517, 0.9972945, 0.0, 0.99810684, 0.97869647, 0.12663864, 0.99751186, 0.99833953, 0.9979551, 0.9982703, 0.99834526, 0.9978021, 0.9980136, 0.9980286, 0.998451, 0.99086046, 0.99858737, 0.86542803, 1.8361666e-25, 0.99648994, 0.9981877, 0.99813545, 0.9976829, 0.9919376, 0.99860424, 0.9358125, 0.9971706, 0.38322046, 0.99816114, 2.0557314e-28, 0.9980394, 0.9982377, 0.9972909, 0.99718314, 0.99785966, 0.99813, 0.99729866, 0.9982504, 0.9981111, 0.9756732, 0.998061, 0.99832267, 0.9980672, 0.9987388, 0.9981674, 0.98818016, 0.997347, 0.99822146, 0.12747872, 0.9985732, 0.99813557, 0.9974981, 0.96433073, 0.9928503, 0.9980373, 0.9977956, 0.9926523, 0.99811494, 0.9979157, 0.997569, 0.9975389, 0.99055064, 0.9975727, 0.99703693, 4.6869163e-06, 0.9981499, 0.9980926, 0.99790883, 0.9973279, 0.998302, 0.99791926, 0.99946564, 0.99822754, 0.99792284, 0.9979442, 0.9955344, 0.99801517, 0.9981317, 0.99803454, 0.9970482, 0.99797565, 0.9980584, 0.99783784, 0.9980445, 0.9978947, 0.99799025, 0.99803954, 0.99746007, 0.99766004, 0.99604875, 0.99185294, 0.99809164, 0.9980665, 0.9979086, 0.9977964, 7.566601e-20, 0.99800724, 0.9980544, 0.9980275, 0.9977933, 0.9984037, 0.99806315] + +import numpy as np +import sklearn +from sklearn.decomposition import PCA +a = np.array([[2,2, 0],[2,2, 0], [1,1, 0]]) + + +a = np.array([[2,2, 0],[2,2, 0], [1,2, 0]]) + +a = np.array([[2,2, 0],[2,2, 0], [50,50, 0]]) + +a = np.array([[1,2,3,4], [1,2,3,4] , [1,2,3,4] , [1,2,3,4]]) + +a = np.array([[1,2,3,4], [2,1,3,4] , [3,0,3,4] , [4,-1,3,4]]) + +a = np.array([[4, 6, 10],[ 3, 10, 13],[-2, -6, -8]]) +from sklearn import preprocessing +a = X_t +a = preprocessing.normalize(a) +pca = PCA(n_components=2) +pca.fit(a) +print(pca.explained_variance_ratio_) +print(pca.singular_values_) +print(pca.components_) + +from sklearn.covariance import EmpiricalCovariance + +E = EmpiricalCovariance() +E.fit(a) +print(E.covariance_) +from numpy import linalg as LA +w, v = LA.eig(E.covariance_) + + +import matplotlib.pyplot as plt + +X = a +y = Y_t +pca = PCA(n_components=2) +X_r = pca.fit(X).transform(X) +import random + +plt.figure() +colors = [ 'blue', 'red'] +target_names = [ 'nofall', 'fall'] +lw = 2 + +for color, i, target_name in zip(colors, [0, 1], target_names): + plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, + label=target_name) +plt.legend(loc='best', shadow=False, scatterpoints=1) +plt.title('PCA') + + + + + +plt.figure() +labs = ['nofall', 'fall'] +for i in range(500): + plt.scatter(X_r[i, 0], X_r[i, 1], color=colors[y[i]], label = labs[y[i]] ) +plt.legend(loc='upper left') + + +j = abs(E.covariance_[0]) +indices = np.where(j > 0.9*j.max()) +j = abs(pca.components_[0]) +indices = np.where(j > 0.9*j.max()) + + + + + + +accuracys_train = [] +accuracys_test = [] +for i in range(len(labels)): + model = XGBClassifier() + model.fit(X_t[:,i].reshape([X_t.shape[0],1]), Y_t) + y_pred = model.predict(X_t[:,i].reshape([X_t.shape[0],1])) + predictions = [round(value) for value in y_pred] + # evaluate predictions + accuracy = accuracy_score(Y_t, predictions) + #print("Training Accuracy: %.2f%%" % (accuracy * 100.0)) + #print(sklearn.metrics.precision_score(Y_t, predictions)) + #print(sklearn.metrics.recall_score(Y_t, predictions)) + + + accuracys_train.append(accuracy) + y_pred = model.predict(X_test[:,i].reshape([X_test.shape[0],1])) + predictions = [round(value) for value in y_pred] + # evaluate predictions + accuracy = accuracy_score(Y_test, predictions) + #print("Testing Accuracy: %.2f%%" % (accuracy * 100.0)) + import sklearn + #print(sklearn.metrics.precision_score(Y_test, predictions)) + #print(sklearn.metrics.recall_score(Y_test, predictions)) + accuracys_test.append(accuracy) + +j = abs(np.array(accuracys_test)) +indices = np.where(j > 0.9*j.max()) +#CSV =([labels, list(pca.components_[0]), list(pca.components_[1]), list(pca.components_[2]), accuracys_train, accuracys_test]) + +BDT_scores = model.feature_importances_ + +j = abs(np.array(BDT_scores)) +indices = np.where(j > 0.9*j.max()) + +CSV = np.array([np.array(labels), pca.components_[0], pca.components_[1], pca.components_[2], model.feature_importances_ , NN, np.array(accuracys_train), np.array(accuracys_test)]) +np.savetxt("PCA.csv", CSV, delimiter=",", fmt='%s') +from matplotlib import pyplot +pyplot.bar(range(len(temp)), temp) +pyplot.show() + +pyplot.bar(range(len(np.array(accuracys_test))), np.array(NN)) +pyplot.show() + + + + + + + diff --git a/train_der.py b/train_der.py new file mode 100644 index 0000000..8190c93 --- /dev/null +++ b/train_der.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Aug 18 22:02:00 2018 + +@author: Dati +""" + + +X = [] +Y = [] +iter = 0 +# prep numpy for random forest +balance_needed = False +while(iter 0.3*j.max()) + +BDT_scores = model.feature_importances_ +j = abs(np.array(BDT_scores)) +indices1 = np.where(j < 0.3*j.max()) + + + +np.sum(model.feature_importances_[indices])/np.sum(model.feature_importances_) + + + + + + + + + + + + + +accuracys_train = [] +accuracys_test = [] +for i in range(X.shape[1]): + model = XGBClassifier() + model.fit(X_train[:,i].reshape([X_train.shape[0],1]), Y_train) + y_pred = model.predict(X_train[:,i].reshape([X_train.shape[0],1])) + predictions = [round(value) for value in y_pred] + # evaluate predictions + accuracy = accuracy_score(Y_train, predictions) + #print("Training Accuracy: %.2f%%" % (accuracy * 100.0)) + #print(sklearn.metrics.precision_score(Y_t, predictions)) + #print(sklearn.metrics.recall_score(Y_t, predictions)) + + + accuracys_train.append(accuracy) + y_pred = model.predict(X_test[:,i].reshape([X_test.shape[0],1])) + predictions = [round(value) for value in y_pred] + # evaluate predictions + accuracy = accuracy_score(Y_test, predictions) + #print("Testing Accuracy: %.2f%%" % (accuracy * 100.0)) + import sklearn + #print(sklearn.metrics.precision_score(Y_test, predictions)) + #print(sklearn.metrics.recall_score(Y_test, predictions)) + accuracys_test.append(accuracy) + + +j = abs(np.array(accuracys_test)) +indices = np.where(j > 0.99*j.max()) + + + + + + +real_accuracy= [] +for i in range(len(labels)): + real_accuracy.append(accuracys_test[i]+accuracys_test[i+183]) +j = abs(np.array(real_accuracy)) +indices = np.where(j > 0.95*j.max()) + +CSV = np.array([accuracys_test, real_accuracy]) +np.savetxt("realacc.csv",CSV, delimiter=",", fmt='%s')