repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
everydaytimmy/data-structures-and-algorithms
python/code_challenges/depth_first_graph/depth_first_graph.py
from code_challenges.graph.graph import Graph, Vertex def depth_traversal(node, graph): visited = set() stack = Stack() nodes = [] stack.push(node) while stack.top: current = stack.pop() nodes.append(current) neighbors = graph.get_neighbors(current) if neighbors: for neighbor in neighbors: if neighbor not in visited: stack.push(neighbor) visited.add(neighbor) # breakpoint() return nodes class Stack: def __init__(self): self.top = None def push(self, value): self.top = Node (value, self.top) def pop(self): if not self.top: raise InvalidOperationError("Method not allowed on empty collection") value = self.top.value self.top = self.top.next return value def is_empty(self): return self.top is None def peek(self): if not self.top: raise InvalidOperationError("Method not allowed on empty collection") return self.top class Node: def __init__(self, value, next = None): self.value = value self.next = next #### Big Brain Energy #### # def depth_first_pre_order(first_vertex, graph): # visited = {} # collection = [] # def walk(start_vertex): # nonlocal visited, collection, graph # if start_vertex is None: # return # if start_vertex not in visited: # collection.append(start_vertex) # visited[start_vertex] = True # if graph._adjacency_list.get(start_vertex): # for end_vertex in graph._adjacency_list.get(start_vertex): # walk(end_vertex) # walk(first_vertex) # return collection
everydaytimmy/data-structures-and-algorithms
python/tests/test_breadth_first.py
from code_challenges.breadth_first.breadth_first import Node, BinaryTree def test_node_has_value(): node = Node("apple") assert node.value == "apple" def test_node_has_left_of_none(): node = Node("apple") assert node.left is None def test_node_has_right_of_none(): node = Node("apple") assert node.right is None def test_create_binary_tree(): tree = BinaryTree() assert tree def test_add_to_empty_bt(): tree = BinaryTree() tree.add(7) actual = tree.root.value expected = 7 assert actual == expected def test_breadth(): tree = BinaryTree() tree.add(7) tree.add(8) tree.add(6) actual = BinaryTree.breadth(tree) expected = [7,8,6] assert actual == expected def test_breadth_many(): tree = BinaryTree() tree.add(7) tree.add(8) tree.add(6) tree.add(20) tree.add(43) tree.add(2) tree.add(1) actual = BinaryTree.breadth(tree) expected = [7,8,6,20,43,2,1] assert actual == expected
everydaytimmy/data-structures-and-algorithms
python/tests/test_hashmap_tree_intersection.py
from code_challenges.hashmap_tree_intersection.hashmap_tree_intersection import tree_intersection from code_challenges.tree.tree import Node, BinaryTree, BinarySearchTree def test__tree_compare(): tree1 = BinarySearchTree() tree1.add(5) tree1.add(2) tree1.add(3) tree2 = BinarySearchTree() tree2.add(5) tree2.add(1) tree2.add(4) actual = tree_intersection(tree1, tree2) expected = [5] assert actual == expected def test_tree_compare_one(): tree1 = BinarySearchTree() tree1.add(5) tree1.add(2) tree1.add(3) tree2 = BinarySearchTree() tree2.add(5) tree2.add(1) tree2.add(4) actual = tree_intersection(tree1, tree2) expected = [5] assert actual == expected def test_tree_compare_two(): tree1 = BinarySearchTree() tree1.add(5) tree1.add(2) tree1.add(3) tree1.add(2) tree1.add(1) tree1.add(2) tree1.add(4) tree2 = BinarySearchTree() tree2.add(5) tree2.add(1) tree2.add(4) actual = tree_intersection(tree1, tree2) expected = [5, 1, 4] assert actual == expected def test_tree_compare_three(): tree1 = BinarySearchTree() tree1.add(5) tree1.add(2) tree1.add(3) tree1.add(2) tree1.add(1) tree1.add(2) tree1.add(4) tree2 = BinarySearchTree() tree2.add(12) tree2.add(22) tree2.add(24) actual = tree_intersection(tree1, tree2) expected = [] assert actual == expected
everydaytimmy/data-structures-and-algorithms
python/tests/test_animal_shelter.py
from code_challenges.fifo_animal_shelter.fifo_animal_shelter import Cat, Dog, Queue, AnimalShelter def test_single_animal(): shelter = AnimalShelter() cat = Cat() shelter.enqueue(cat) actual = shelter.dequeue("cat") expected = cat assert actual == expected def test_animal_dequeue(): shelter = AnimalShelter() cat = Cat() dog = Dog() shelter.enqueue(cat) shelter.enqueue(dog) shelter.enqueue(dog) actual = shelter.dequeue('dog') expected = dog assert actual == expected def test_animal_dequeue_none(): shelter = AnimalShelter() cat = Cat() dog = Dog() shelter.enqueue(cat) shelter.enqueue(dog) shelter.enqueue(cat) shelter.enqueue(dog) actual = shelter.dequeue('lion') expected = None assert actual == expected
everydaytimmy/data-structures-and-algorithms
python/code_challenges/quick_sort/quick_sort.py
def partition(start, end, array): pivot_index = start pivot = array[pivot_index] while start < end: while start < len(array) and array[start] <= pivot: start += 1 while array[end] > pivot: end -= 1 if(start < end): array[start], array[end] = array[end], array[start] array[end], array[pivot_index] = array[pivot_index], array[end] return end # The main function that implements QuickSort def quick_sort(start, end, array): if (start < end): # p is partitioning index, array[p] # is at right place p = partition(start, end, array) # Sort elements before partition # and after partition quick_sort(start, p - 1, array) quick_sort(p + 1, end, array) return array
everydaytimmy/data-structures-and-algorithms
python/tests/test_merge_sort.py
<filename>python/tests/test_merge_sort.py from code_challenges.merge_sort.merge_sort import merge_sort, merge def test_merge(): assert merge_sort def test_merge1(): actual = merge_sort([8,4,23,42,16,15]) expected = [4,8,15,16,23,42] assert actual == expected def test_merge2(): actual = merge_sort([3,4,12,42,-8,15]) expected = [-8,3,4,12,15,42] assert actual == expected
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/utility.py
<gh_stars>0 import matplotlib.pyplot as plt import matplotlib.animation as animation import datetime as dt def graph_it(x, y, x_text, y_text, title): plt.plot(x, y) plt.xlabel(x_text) plt.ylabel(y_text) plt.title
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/RnDGui.py
<reponame>mVolpe94/Pellet-Stove-Automation<filename>TestingEnvironment/RnDGui.py # import RPi.GPIO as gpio import time, os, sys, re import matplotlib.pyplot as plt import utility import mainprocess from tkinter import * from tkinter import ttk augers = 5 starter = 6 convection_blower = 13 combustion_blower = 21 room_air_sensor = 19 outgoing_air_sensor = 26 flame_sensor = 20 #pin setup # gpio.setup(augers, gpio.OUT) # gpio.setup(convection_blower, gpio.OUT) # gpio.setup(combustion_blower, gpio.OUT) # gpio.setup(room_air_sensor, gpio.IN) # gpio.setup(outgoing_air_sensor, gpio.IN) # gpio.setup(flame_sensor, gpio.IN) button = 0 running = None class RndGui: def __init__(self, root, auger_running): self.auger_running = auger_running root.title("PELLET STOVE R+D") root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) ############### Main Frame #Main Frame settings_frame = ttk.Frame(root, padding="15 15 15 15") settings_frame.grid(column=0, row=0, sticky=(N, S, E, W)) #Auger Test auger_run_time = IntVar() auger_label = ttk.Label(settings_frame, text='Auger Run Time:') auger_label.grid(column=0, row=0, sticky=W) self.auger_low = ttk.Button( settings_frame, text="Low", command=lambda: [self.afterkill(self.running), self.auger_modes(4000, False)] ) self.auger_low.grid( column=1, row=0, padx=5, sticky=(W,E)) self.auger_medium = ttk.Button( settings_frame, text="Medium", command=lambda: [self.afterkill(self.running), self.auger_modes(3000, False)]) self.auger_medium.grid( column=2, row=0, padx=5, sticky=W) auger_off = ttk.Button( settings_frame, text="Off", command=lambda: self.auger_modes(0, "OFF")) auger_off.grid(column=1, row=1, padx=5, sticky=W) auger_high = ttk.Button( settings_frame, text="High", command=lambda: [self.afterkill(self.running), self.auger_modes(2000, False)]) auger_high.grid( column=2, row=1, padx=5, sticky=W) ttk.Separator(settings_frame, orient=HORIZONTAL).grid(row=2, pady=8, columnspan=3, sticky=(E,W)) #Combustion Blower Test combustion_blower_label = ttk.Label(settings_frame, text="Combustion Blower:") combustion_blower_label.grid(column=0, row=3, sticky=W) combustion_blower_off = ttk.Button(settings_frame, text="Off", command=lambda: gpio.output(combustion_blower, False)) combustion_blower_off.grid(column=1, row=3, padx=5, sticky=W) combustion_blower_on = ttk.Button(settings_frame, text="On", command=lambda: gpio.output(combustion_blower, True)) combustion_blower_on.grid(column=2, row=3, padx=5, sticky=W) ttk.Separator(settings_frame, orient=HORIZONTAL).grid(row=4, pady=8, columnspan=3, sticky=(E,W)) #Starter Test starter_run_time = IntVar() starter_label = ttk.Label(settings_frame, text="Starter Time Test:") starter_label.grid(column=0, row=5, sticky=W) starter_time = ttk.Entry(settings_frame, width=11, textvariable=starter_run_time) starter_time.grid(column=1, row=5, padx=5, sticky=(W,E)) starter_button = ttk.Button(settings_frame, text="Run", command=lambda: self.starter_timer(starter_run_time.get())) starter_button.grid(column=2, row=5, padx=5, sticky=W) starter_off_button = ttk.Button(settings_frame, text="Off", command=lambda: gpio.output(starter, False)) starter_off_button.grid(column=1, row=6, padx=5, sticky=W) starter_on_button = ttk.Button(settings_frame, text="On", command=lambda: gpio.output(starter, True)) starter_on_button.grid(column=2, row=6, padx=5, sticky=W) ttk.Separator(settings_frame, orient=HORIZONTAL).grid(row=7, pady=8, columnspan=3, sticky=(E,W)) #Convection Blower Test convection_blower_label = ttk.Label(settings_frame, text="Convection Blower:") convection_blower_label.grid(column=0, row=8, sticky=W) convection_blower_off = ttk.Button(settings_frame, text="Off", command=lambda: gpio.output(convection_blower, False)) convection_blower_off.grid(column=1, row=8, padx=5, sticky=W) convection_blower_on = ttk.Button(settings_frame, text="On", command=lambda: gpio.output(convection_blower, True)) convection_blower_on.grid(column=2, row=8, padx=5, sticky=W) ttk.Separator(settings_frame, orient=VERTICAL).grid(column=3, row=0, rowspan=9, padx=10, sticky=(N,S)) ############### Stats Frame #Stats Frame stats_frame = ttk.Frame(root, padding="15 15 15 15") stats_frame.grid(column=1, row=0, sticky=(N, S, E, W)) #Read Room Temp self.room_air_temp = StringVar() self.room_air_temp.set(f"Room Temperature: {self.read_temp()}") room_temp_label = ttk.Label(stats_frame, textvariable=self.room_air_temp) room_temp_label.grid(column=0, row=0, sticky=W) read_temp_button = ttk.Button(stats_frame, text="Read Temp", command=self.read_temp) read_temp_button.grid(column=1, row=0, sticky=W, padx=5) auto_graph_button = ttk.Button(stats_frame, text="Auto Graph", command=lambda: self.graph_it(time_list, temp_list)) auto_graph_button.grid(column=1, row=1, sticky=W, padx=5) ttk.Separator(stats_frame, orient=HORIZONTAL).grid(column=0, row=2, pady=8, columnspan=2, sticky=(W,E)) self.auger_state = StringVar() auger_speed = ttk.Label(stats_frame, text=f"Auger Speed: {}") # def auger_timer(self, sec): # start_time = time.time() # gpio.output(augers, True) # if start_time > time.time() + sec: # gpio.output(augers, False) def starter_timer(self, sec): start_time = time.time() gpio.output(starter, True) if start_time > time.time() + sec: gpio.output(starter, False) def auger_modes(self, time, power): if not power: if power == "OFF": print("OOOOOOFFFFFF") self.afterkill(self.auger_running) return None print("auger on") print(time) power = True ms = 5000 - time self.auger_running = root.after(ms, self.auger_modes, time, power) elif power: if power == "OFF": print("OOOOOOFFFFFF") self.afterkill(self.running) return None print('auger off') print(time) power = False self.auger_running = root.after(time, self.auger_modes, time, power) def afterkill(self, function): try: root.after_cancel(function) except(ValueError): print("Function doesn't exsist in memory. Yet...") def read_temp(self): #REPLACE PATH WITH PI PATH TO temp_file = open("TestingEnvironment/temptest.txt") lines = temp_file.readlines() p = re.compile(r"[t][=](\d*)") result = p.search(lines[1]) room_air = result.group(1) room_air = int(room_air) / 1000 room_air = 9 / 5 * room_air + 32 self.room_air_temp.set(f"Room Temperature: {room_air}") return room_air root = Tk() RndGui(root, None) root.mainloop()
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/testtimer.py
<reponame>mVolpe94/Pellet-Stove-Automation import time x = 0 start = time.time() print(start) while x < 10000000: x += 1 end = time.time() print(end) print(end-start)
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/testfileread.py
import os, re temp_file = open("TestingEnvironment/temptest.txt") lines = temp_file.readlines() temp_line = lines[1] p = re.compile(r"[t][=](\d*)") result = p.search(lines[1]) temp = result.group(1) temp = int(temp) / 1000 temp = 9/5 * temp + 32 print(temp)
mVolpe94/Pellet-Stove-Automation
setup-noGUI.py
<reponame>mVolpe94/Pellet-Stove-Automation #import RPi.GPIO as gpio import time, os, sys from threading import Thread from tkinter import * from tkinter import ttk #tasks # When room temp is lower than set temperature, activate heat exchange blower, add pellets # If heat exchange intake delta t is low, increase feul into burn pot (activate auger) # If heat exchagne intake is same temp as set temp, add more fuel when temp set is higher than room temp # 1 second cycles # check all temps every minute #Variable Setup room_air_temp = 65 # IO Setup #pin numbers auger1 = 5 auger2 = 6 heat_exg_blower = 13 combustion_blower = 21 room_air_sensor = 19 #outgoing_air_sensor = 26 burn_pot_sensor = 20 #pin setup # gpio.setup(auger1, gpio.OUT) # gpio.setup(auger2, gpio.OUT) # gpio.setup(heat_exg_blower, gpio.OUT) # gpio.setup(combustion_blower, gpio.OUT) # gpio.setup(room_air_sensor, gpio.IN) # gpio.setup(outgoing_air_sensor, gpio.IN) # gpio.setup(burn_pot_sensor, gpio.IN) running = True def read_temp(): temp_file = open("/sys/bus/w1/devices/28-021466be69ff/w1_slave") lines = temp_file.readlines() temp_file.close() p = re.compile(r"[t][=](\d*)") result = p.search(lines[1]) room_air_temp = result.group(1) room_air_temp = int(room_air_temp) / 1000 room_air_temp = 9 / 5 * room_air_temp + 32 return room_air_temp def deltaTtimer(room_air_temp): old_room_temp = room_air_temp #Take input of incoming air temp time.sleep(5) new_temp = 66 #Take input of incoming air deltaT = new_temp - old_room_temp return deltaT def deltaTtimed(room_air_temp): start_time = time.time() current_room_temp = read_temp() #Take input of incoming air temp target_temp = current_room_temp + 1 while current_room_temp <= target_temp: current_room_temp = read_temp()#Take input of incoming air time.sleep(1) end_time = time.time() return end_time - start_time # deltaT = deltaTtimer(room_air_temp) # if deltaT <= 0: # pass # #Add more pellets # elif deltaT <= 1: # pass # elif deltaT > 1: # pass def running(): while is_running: pass Thread(target = running).start() Thread(target = deltaTtimed).start()
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/tempprocess.py
def temp(): x = 0 while x < 10: x += 1
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/threadtest.py
import mainprocess import os from multiprocessing import Process, Pipe # to use multiple pipes with one function, must define new connection names # and call the recieve for each # must have a pipeline for each sensor used if __name__ == '__main__': os.system('modprobe w1-gpio') os.system('modprobe w1-therm') parent_temp_conn, child_temp_conn = Pipe() p = Process(target=mainprocess.sensor_check, args=[child_temp_conn]) p.start() for _ in range(10): print(parent_temp_conn.recv()) print(parent_temp_conn.recv()) p.join()
mVolpe94/Pellet-Stove-Automation
setup.py
<reponame>mVolpe94/Pellet-Stove-Automation<filename>setup.py #import RPi.GPIO as gpio import time, os, sys from tkinter import * from tkinter import ttk #tasks # When room temp is lower than set temperature, activate heat exchange blower, add pellets # If heat exchange intake delta t is low, increase feul into burn pot (activate auger) # If heat exchagne intake is same temp as set temp, add more fuel when temp set is higher than room temp # 1 second cycles # check all temps every minute # #Variable Setup room_air_temp = 65 # IO Setup #pin numbers augers = 5 heat_exg_blower = 13 combustion_blower = 21 room_air_sensor = 19 #outgoing_air_sensor = 26 burn_pot_sensor = 20 #pin setup # gpio.setup(auger1, gpio.OUT) # gpio.setup(auger2, gpio.OUT) # gpio.setup(heat_exg_blower, gpio.OUT) # gpio.setup(combustion_blower, gpio.OUT) # gpio.setup(room_air_sensor, gpio.IN) # gpio.setup(outgoing_air_sensor, gpio.IN) # gpio.setup(burn_pot_sensor, gpio.IN) class PelletStove: def __init__(self, root): root.title("Pellet Stove") root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) stats_frame = ttk.Frame(root, padding="15 15 15 15") stats_frame.grid(column=0, row=0, sticky=(N, W, E, S)) room_temp_label = ttk.Label(stats_frame, text=f"Room Temperature: {room_air_temp}" + u"\N{DEGREE SIGN}") room_temp_label.grid(column=0, row=0, sticky=W) root = Tk() PelletStove(root) root.mainloop() def read_temp(): temp_file = open("/sys/bus/w1/devices/28-021466be69ff/w1_slave") lines = temp_file.readlines() p = re.compile(r"[t][=](\d*)") result = p.search(lines[1]) room_air_temp = result.group(1) room_air_temp = int(room_air_temp) / 1000 room_air_temp = 9 / 5 * room_air_temp + 32 return room_air_temp def deltaTtimer(room_air_temp): old_room_temp = room_air_temp #Take input of incoming air temp time.sleep(5) new_temp = 66 #Take input of incoming air deltaT = new_temp - old_room_temp return deltaT deltaT = deltaTtimer(room_air_temp) if deltaT <= 0: pass #Add more pellets elif deltaT <= 1: pass elif deltaT > 1: pass def deltaTtimed(room_air_temp): start_time = time.time() current_room_temp = read_temp() #Take input of incoming air temp target_temp = current_room_temp + 1 while current_room_temp <= target_temp: current_room_temp = read_temp()#Take input of incoming air time.sleep(1) end_time = time.time() return end_time - start_time
mVolpe94/Pellet-Stove-Automation
TestingEnvironment/mainprocess.py
<reponame>mVolpe94/Pellet-Stove-Automation from multiprocessing import Process, Pipe import time, os, sys, re def read_temp(): temp_file = open("TestingEnvironment/temptest.txt") lines = temp_file.readlines() p = re.compile(r"[t][=](\d*)") result = p.search(lines[1]) room_air_temp = result.group(1) room_air_temp = int(room_air_temp) / 1000 room_air_temp = 9 / 5 * room_air_temp + 32 return room_air_temp def read_ash(): pass def sensor_check(conn): running = True while running: temp = read_temp() conn.send(temp) time.sleep(1) break while running: temp = read_temp() temp += 1 conn.send(temp) time.sleep(1)
maknotavailable/uplabel
code/log.py
""" Log labeling iterations and metrics Local or from blob storage. """ import json from pathlib import Path class Log(): def __init__(self, fn): self.fn = fn self.logs = [] def read_log(self): """load or create logs""" if Path(self.fn).is_file(): with open(self.fn, 'r') as fn: self.logs = json.load(fn) else: # Initialize log self.logs = dict(iterations=[dict()]) def write_log(self, name, value, save=True): """write logs""" self.logs['iterations'][self.iter][name] = value if save: with open(self.fn, 'w') as fn: json.dump(self.logs,fn) def set_iter(self, iter): print('####################################') print(f'**** \tITERATION # {iter} \t\t****') print('####################################\n') self.iter = iter if len(self.logs['iterations']) != self.iter + 1: self.logs['iterations'].append(dict(iteration = iter)) self.write_log('iteration',iter)
louisopen/Ethernet2uart
app_device.py
#!/usr/bin/env python #coding= utf-8 from ConsoleThreading import * from flask import Flask, render_template, Response, request, redirect, jsonify import json app = Flask(__name__) Model='Web2uart Control' nameing = [{"id": 1, "name": "CHIPS", "done": False}, {"id": 2, "name": "garden", "done": False}] #json def CheckSerial(): #Serial = SerialTask('/dev/ttyAMA0',115200) #test Slave of the PC station control #Serial = SerialTask('/dev/ttyUSB0',115200) #test Master of ipad myself Serial = SerialTask('/dev/ttyUSB0',9600) #test Master of BK5491B #Serial = SerialTask('/dev/ttyACM0',115200) #test Master of PSW 80-13.5 power supply if Serial.isopen(): task = Thread(target=Serial.run, args=('for_serial_class',)) #開啟Allocate threading function task.start() #啟動 return Serial #========================================================================== # Run Raspberry Pi #========================================================================== @app.route('/model', methods = ['GET']) #like model for automatic control def model(): lines= Model+'</br>' return Response(lines, mimetype='text/html') @app.route('/version', methods = ['GET']) def version(): Serial=CheckSerial() if Serial.isOpen(): message='*IDN?\n' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() lines=Serial.receive() else: lines='Check serial port' Serial.terminate() lines=lines.replace('\n','</br></br>') #to text/html return Response(lines, mimetype='text/html') @app.route('/getdata', methods = ['GET']) def get_json(): return json.dumps(nameing) #return jsonify(nameing) #異曲同工 @app.route('/getset/<int:id>', methods=['GET']) def set_json(id=1): task = filter(lambda t: t['id'] == id, nameing) if len(task) == 0: abort(404) return jsonify(task[0]) #符合之項目 #return jsonify({'task': task[0]}) #符合之項目之數組 #return json.dumps({'id':id}) @app.route('/getpost/<int:id>/<name>', methods = ['GET']) def post_set(id,name): task = { "id": nameing[-1]['id'] + 1, "name": name, "done": False } nameing.append(task) return jsonify(nameing) #return jsonify({'nameing': nameing}) #會多了{ "nameing":[ {"done":True,"id":5,"name":"L"},{...}] } @app.route('/postreturn', methods = ['POST']) #GET for debug def post_data(name='L'): print (request.headers) #print (request.data) print (request.get_data()) #return Response("POST") #for test #return Response(request.headers+request.data) return Response(str(request.headers)+request.get_data()) #return Response(request.form.to_dict(), mimetype='text/html') #return Response(request.form.keys(), mimetype='text/html') #return Response(request.form.values(), mimetype='text/html') #return render_template(request.headers+request.data) #return render_template('post_submit.html') @app.route('/postjson', methods = ['POST','GET']) #GET for debug def post_json(name='L'): print(request.headers) ''' if not request.json: #have josn body abort(400) ''' if "application/json" in request.headers["Content-Type"]: #body = request.json body = request.get_json() #print(json.dumps(request.get_json())) print('POST souce:\r\n%s'%body) print('POST json:\r\n%s'%json.dumps(body)) else: #abort(400) #只處理json body #body = request.data body = request.get_data() #print(json.loads(request.get_data(as_text=True))) #print('POST souce:\r\n%s'%body) print('POST body:\r\n%s'%body) if request.method == 'POST': #先決是要有key (POST,GET) print ("POST: ") #print (body['id']) j_data = json.loads(json.dumps(body)) #str to dict to json print (j_data) #print ("{}".format(j_data)) #print (json.dumps(body,ensure_ascii=False)) ''' print (request.values['id']) #來自POST的header中parameter中的key:value print (request.values['name']) #來自POST的header中parameter中的key:value print (request.values['done']) #來自POST的header中parameter中的key:value print (request.form.to_dict()) #{'done': u'0', 'id': u'7', 'name': u'LLL'} print (list(request.form.keys())) #來自POST的body中的key(全部list) print (request.form.get('id')) #來自POST的body中的key:value print (request.form.get('name')) #來自POST的body中的key:value print (request.form.get('done')) #來自POST的body中的key:value ''' task = { "id": nameing[-1]['id'] + 1, "name": j_data['name'], #根據Client request {"name": "xxxx"} "done": True if 'True' in j_data['done'] else False } else: #Because of "GET", that may have request "GET" value print ("GET: ") print (request.args.get('id')) #來自GET的header中parameter中的key:value print (request.args.get('name')) #來自GET的header中parameter中的key:value print (request.args.get('done')) #來自GET的header中parameter中的key:value task = { "id": nameing[-1]['id'] + 1, "name": request.values['name'], #根據Client request {"name": "xxxx"} "done": True if 'True' in request.values['done'] else False } nameing.append(task) return jsonify(nameing) #[{"id":1,"name":"CHIPS"},{"id":2,"name":"garden"},{"id":3,"name":"L"}] #return jsonify({'nameing': nameing}) #{"nameing":[{"id":1,"name":"CHIPS"},{"id":2,"name":"garden"},{"id":3,"name":"L"}]}
louisopen/Ethernet2uart
ConsoleThreading.py
<gh_stars>1-10 #!/usr/bin/env python #coding= utf-8 import serial import serial.tools.list_ports from threading import Thread import time, os, sys, datetime #=========================================================================== class SerialTask: def __init__(self, p_ch, baud): self._running = False #True is for Slave mode waitting RXD command self.getData='' self.rec_string='' self.debounce=0 self.p_ch=p_ch #find all available devices ports = list(serial.tools.list_ports.comports()) #Open the port for self.p in ports: print '\rSerial port is detected %s'%self.p #/dev/ttyUSB1 - 5491B Multimeter #/dev/ttyUSB0 - FT232R USB UART #/dev/ttyAMA0 - ttyAMA0 if self.p_ch in self.p: print '\rConnect to Serial port %s'%self.p try: #print '\rRaspberry is detected %s'%p[0] self.serial_port = serial.Serial(port=self.p[0], baudrate=baud, timeout=1, writeTimeout=1) if not self.serial_port.isOpen(): self.serial_port.open() self._running = True break #except: except (OSError, serial.SerialException): print ("\rUnexpected error:", sys.exc_info()) pass def isopen(self): #print self.p[0] if self.p_ch==self.p[0]: return True return False #if self._running: # return True #return False def isOpen(self): if self._running: return True return False def read(self,num): return self.serial_port.read(num) def readline(self): return self.serial_port.readline() def terminate(self): self._running = False self.serial_port.close() #def write(self,data): #write anything #self.serial_port.write(data.encode('utf-8')) def send(self,data): #write anything if not data: return #self.serial_port.flushInput() #self.serial_port.flushOutput() #serial_port.write('\r'.encode('utf-8')) self.serial_port.write(data.encode('utf-8')) #self.serial_port.write(data.decode("hex")) time.sleep(0.2) def receive(self): #read anything #return bytes.decode(self.serial_port.read(256)) time_out=0 while self.rec_string=='': time.sleep(0.01) time_out+=1 if time_out>150: #set time out 1.5sec. return '' pass temp=self.rec_string self.rec_string='' return temp def run(self,message): #self.serial_port.write('\r\nStart '+ Model +' Serial\r\nshell>'.encode('utf-8')) self.serial_port.flushInput() while self._running: try: #ch = bytes.decode(serial_port.read(1)) ch = self.serial_port.read(1) if ch =='': #print '\r\nTime: %s'%datetime.datetime.now().strftime('%H:%M:%S') self.debounce += 1 if self.debounce > 2: #2 sec. if timebase is 0.125 #print '\r\nFlush buffer: %s\r\n'%self.getData.encode('hex') self.serial_port.flushInput() self.getData='' self.debounce=0 pass elif ch == '\n': ##############Normally for 0A################## if self.getData !='': #process_receive(self.getData) #here for app_ipad & normal devices self.rec_string=self.getData #or here for app_ipad & normal devices pass #print '\r\nEnter: %s'%self.getData #DEBUG self.getData='' self.debounce=0 ##############Special for 5491B################## #elif ch == '\x0d': #elif ch == '\r': #丟棄返回的指令(if BK5491B) # #print '\r\nFeed: %s'%self.getData #DEBUG # self.getData='' # self.debounce=0 else: #print '\r\nCh: %s'%ch #ch = ch.upper() self.getData += bytes.decode(ch) self.debounce=0 #print '\r\nString: %s'%self.getData #DEBUG time.sleep(0.005) #fast continue except Exception, e: print '\r\nSerial exception: %s\r\n'% str(e) self.serial_port.flushInput() self.getData='' pass time.sleep(0.125) #GPIO.output(LED[1],GPIO.LOW) #=========================================================================== def process_receive(getData): #for Master try: #cmds=[for x in input().split()] #cmds=[getData.split()] #cmds=[getData.split('',2)] #cmds=getData.split() cmds=getData.split(':') print '\r\nCommand: %s %s'%(len(cmds), cmds) #Change 20200413 logmsg='\r\nCommand fail: '+cmds[0]+'\r\nshell>' #20200610 Changed 預入載, 當錯誤逃離時 #watchdog = Watchdog(4, restart_program) #使用自己的Handler(),否則就使用類庫內定義handler if cmds[0]=='volt': logmsg='\r\n'+cmds[0]+'\r\nshell>' pass elif cmds[0]=='curr': logmsg='\r\n'+cmds[0]+'\r\nshell>' pass else: logmsg='\r\n'+cmds[0]+'\r\nshell>' pass except: logmsg='\r\nCommand fail: '+cmds[0]+'\r\nshell>' #20200610 Changed 預入載, 當錯誤逃離時 pass #watchdog.stop() return logmsg """ return { 'a': 1, print '1' 'b': 2, print '2' 'c': 3, print '3' } }.get(var,'error') #'error'為預設返回值,可自設定 """ #=========================================================================== #Call example #=========================================================================== def app1(Serial): if Serial.isOpen(): Serial.send('*IDN?\r') print '\r%s'%Serial.receive() print '\r%s'%Serial.frequency_range('500') print '\r%s\n'%Serial.function_read() #print '\r%s'%Serial.receive() print '\r%s'%Serial.resistance_range('200000') print '\r%s\n'%Serial.function_read() #print '\r%s'%Serial.receive() for i in range (1,50,1): print '\r\nSwitch to curr:' message=":func curr:dc ;:curr:dc:rang 0.5\r" Serial.send(message) #message=":func?\r" #':func?\r' request mode, ":func\r" just for beep echo #Serial.send(message) print '\r%s'%Serial.receive() message=':fetch?\r' Serial.send(message) print '\r%s\n'%Serial.receive() print '\r%s\n\n'%Serial.function_status() time.sleep(0.5) ################################################################ print '\r\nSwitch to volt:' message=":func volt:dc ;:volt:dc:rang 5\r" Serial.send(message) #message=":func?\r" #':func?\r' request mode, ":func\r" just for beep echo #Serial.send(message) print '\r%s'%Serial.receive() message=':fetch?\r' Serial.send(message) print '\r%s\n'%Serial.receive() print '\r%s\n\n'%Serial.function_status() time.sleep(0.5) #=========================================================================== def app2(Serial): if Serial.isOpen(): Serial.send('*IDN?\r') #print '\r%s\n'%Serial.function_read() print '\r%s'%Serial.receive() ''' print '\r%s'%Serial.frequency_range('500') print '\r%s\n'%Serial.function_read() #print '\r%s'%Serial.receive() print '\r%s'%Serial.resistance_range('200000') print '\r%s\n'%Serial.function_read() #print '\r%s'%Serial.receive() ''' #==================================================================# for i in range (1,100,1): print '\r%s'%Serial.current_dc_range('0.5') print '\r%s\n'%Serial.function_read() #print '\r%f\n'%(Serial.function_read()*10000000) print '\r%s'%Serial.voltage_dc_range('5') print '\r%s\n'%Serial.function_read() #print '\r%f\n'%(Serial.function_read()*10000000) print '\r%s'%Serial.frequency_range('500') print '\r%s\n'%Serial.function_read() #time.sleep(0.5) #print '\r%s'%Serial.resistance_range('50000000') time.sleep(0.5) print '\r%s\n'%Serial.function_read() #=========================================================================== def appBK(Serial): #for test write only if Serial.isOpen(): Serial.send('*IDN?\r') for i in range (1,100,1): Serial.send(':func?\r') time.sleep(0.5) #=========================================================================== def app_PSW(Serial): if Serial.isOpen(): message='*IDN?\n' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() for i in range (1,50,1): message='SOUR:VOLT:LEV:IMM:AMPL 5.2\n' Serial.send(message) message='SOUR:CURR:LEV:IMM:AMPL 1.0\n' Serial.send(message) message='OUTP:DEL:ON 0.5\n' Serial.send(message) message='OUTP:STAT:IMM ON\n' Serial.send(message) time.sleep(0.5) #message='STAT:QUES:COND?\n' #message='OUTP:MODE?\n' #message='OUTP:STAT:IMM?\n' #message='MEAS:SCAL:CURR:DC?\n' message='MEAS:SCAL:VOLT:DC?\n' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() #=========================================================================== def app_ipad(Serial): if Serial.isOpen(): message='*IDN?\r' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() message='model\r' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() for i in range (1,100,1): message='STBY\r' Serial.send(message) time.sleep(0.1) print '\r%s'%Serial.receive() pass #=========================================================================== if __name__=='__main__': #Serial = SerialTask('/dev/ttyAMA0',115200) #test Slave of the PC station control #Serial = SerialTask('/dev/ttyUSB0',115200) #test Master of ipad myself #Serial = SerialTask('/dev/ttyUSB0',9600) #test Master of BK5491B Serial = SerialTask('/dev/ttyACM0',115200) #test Master of PSW 80-13.5 power supply if Serial.isopen(): task = Thread(target=Serial.run, args=('for_serial_class',)) #開啟Allocate threading function task.start() #啟動 try: #app1(Serial) #BK5491B #app2(Serial) #BK5491B #appBK(Serial) #BK5491B app_PSW(Serial) #PSW 80-13.5 Power supply #app_ipad(Serial) #X1608... except KeyboardInterrupt: pass Serial.terminate() task.join()
louisopen/Ethernet2uart
app.py
#!/usr/bin/env python #coding= utf-8 from app_device import * from flask import Flask, render_template, Response, request, redirect import os # home page @app.route('/') @app.route('/index') @app.route('/index.html') def index(): return render_template('index.html') if __name__ == '__main__': app.run(host='0.0.0.0', port=5000) #myself IP
dchemishanov/GDrive-Access-Monitor
gdam.py
<reponame>dchemishanov/GDrive-Access-Monitor<filename>gdam.py # -*- coding: utf-8 -*- #from __future__ import print_function """ Google Drive Access Monitor This script is based on the Google's quick start guide to Google Drive API: https://developers.google.com/drive/v3/web/quickstart/python It uses the Oauth2 authentication mechanism and modifies the api call for file metadata. Every other change is entirely my own. This script is developed as an internal tool for Strypes EOOD https://www.strypes.eu, part of the Dutch Tech Cluster Author: <NAME> Version: 0.3 Date: 2017-12-04 License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0 The script is intended for companies that are trying to review the security of their files in the cloud and the unintended exposure of sensitive information to the wider Internet. Paid services do exist, but they require administrative access to the Google Drive which is unnecessary. The current script could be executed by any employee showing all public files and those visible to the entire company. For security reasons, obtaining your own credentials for authentication is highly recommended. """ import httplib2 import os from apiclient import discovery from oauth2client import client from oauth2client import tools from oauth2client.file import Storage try: import argparse flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args() except ImportError: flags = None # If modifying these scopes, delete your previously saved credentials # at ~/.credentials/drive-python-quickstart.json SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly' CLIENT_SECRET_FILE = 'client_secret.json' APPLICATION_NAME = 'Drive API Python Quickstart' def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'drive-python-quickstart.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials def connection(): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('drive', 'v3', http=http) return service def is_exposed_to(item): """Checks all permissions of the file and returns the most exposing.""" public_permission = "" permissions = item.get('permissions', []) for permission in permissions: if permission['type'] == 'anyone': public_permission = 'public' break elif permission['type'] == 'domain': public_permission = 'companyDomain' return public_permission def entries(): """Brows all files visible for the current user and returns a filled in file with the relevant identifying information.""" table_body = [] token = None files_metadata_items = [] fields_list = "nextPageToken, files(id, name, permissions, owners, webViewLink, webContentLink)" service = connection() api_call_results = service.files().list( fields=fields_list).execute() files_metadata_items = api_call_results.get('files', []) while True: for item in files_metadata_items: exposed = is_exposed_to(item) if exposed != "": row = [item['name'].encode('ascii', "ignore"), ', '.join([o['displayName'] for o in item['owners']]), # backward compatibility reasons exposed, item.get('webViewLink', '-'), item.get('webContentLink', '-')] table_body.append(row) # check if the list of files has been exhausted # api_call_results is instantiated two times before and in the while loop # the idea is to account for the first call where there is no page token # if there is no token to a next page of results, break the connection # if token is not None, get new api_call_results # files_metadata_items could be empty list, because the api does not promise full page of results despite of search not finished token = api_call_results.get('nextPageToken', None) if not token: break api_call_results = service.files().list( pageToken=token, fields=fields_list).execute() files_metadata_items = api_call_results.get('files', []) return table_body def main(): """Accepts the structured output and formats it for human use.""" with open("results.txt", 'w') as f: f.write( '\n'.join([ ', '.join([cell for cell in row]) for row in entries() ])) if __name__ == '__main__': main()
FelixMohr/terratrader
src/trading_core.py
<filename>src/trading_core.py from typing import Dict from terra_sdk.client.lcd import LCDClient, Wallet from terra_sdk.core import Coins from terra_sdk.core.auth import StdFee, StdTx from terra_sdk.core.broadcast import BlockTxBroadcastResult from terra_sdk.core.wasm import MsgExecuteContract from src import const from src.helpers import to_u_unit, info, start_halo, stop_halo from src.messages import get_sell_dict, get_buy_dict from src.params import Params def get_bluna_for_luna_price(terra: LCDClient, params: Params): spinner = start_halo('Retrieving bluna for luna price', params) sent_amount = to_u_unit(params.amount_luna) result = get_swap_price(sent_amount, terra, const.luna_info) stop_halo(spinner) return result def get_luna_for_bluna_price(terra: LCDClient, params: Params): spinner = start_halo('Retrieving bluna for luna price', params) sent_amount = to_u_unit(params.amount_bluna) result = get_swap_price(sent_amount, terra, const.bluna_info) stop_halo(spinner) return result def get_swap_price(sent_amount: int, terra: LCDClient, info_dict: Dict): return_amount = int(terra.wasm.contract_query(const.luna_bluna, { "simulation": { "offer_asset": { "info": info_dict, "amount": str(sent_amount) }} })['return_amount']) return return_amount, sent_amount / return_amount def buy(params: Params, terra: LCDClient, belief_price: float, wallet: Wallet) -> bool: msg = MsgExecuteContract(wallet.key.acc_address, const.luna_bluna, get_buy_dict(belief_price, params), Coins(uluna=to_u_unit(params.amount_luna))) return execute_contract(msg, terra, wallet, params) def sell(params: Params, terra: LCDClient, belief_price: float, wallet: Wallet) -> bool: msg = MsgExecuteContract(wallet.key.acc_address, const.bluna_contract, get_sell_dict(belief_price, params)) return execute_contract(msg, terra, wallet, params) def execute_contract(exec, terra, wallet, params: Params) -> bool: execute_tx: StdTx = wallet.create_and_sign_tx(msgs=[exec]) execute_tx_result: BlockTxBroadcastResult = terra.tx.broadcast(execute_tx) info("transaction hash:", params.should_log()) info(str(execute_tx_result.txhash), params.should_log()) return execute_tx_result.code is None
FelixMohr/terratrader
cli.py
import terra_sdk import sys from src.trading_core import get_bluna_for_luna_price, get_luna_for_bluna_price, buy, sell from src.helpers import create_params, info, create_terra, create_wallet, warn, from_u_unit, get_arg_safe from src import bot, const def main(): with open("files/greeting.txt") as f: content = f.read() print(content) params, terra, wallet = setup() while True: inp = input(' 👽 >>> ').strip() split = inp.split() if not len(split): continue command = split[0] args = split[1:] if command == 'quit': terra.session.close() break try: if command == 'price': return_amount, price = get_bluna_for_luna_price(terra, params) info("returned for {} Luna: {} bLuna, price: {}".format(params.amount_luna, from_u_unit(return_amount), price)) return_amount, price = get_luna_for_bluna_price(terra, params) info("returned for {} bLuna: {} Luna, inv price: {}, price: {}".format(params.amount_bluna, from_u_unit(return_amount), 1 / price, price)) elif command == 'amount-luna': amount_luna = get_arg_safe(args) if amount_luna: params.amount_luna = float(amount_luna) info("amount for selling Luna set to {}".format(params.amount_luna)) elif command == 'amount-bluna': amount_luna = get_arg_safe(args) if amount_luna: params.amount_bluna = float(amount_luna) info("amount for selling bLuna set to {}".format(params.amount_bluna)) elif command == 'inv-sell-price': inv_sell_price = get_arg_safe(args) if inv_sell_price: params.inv_sell_price = float(inv_sell_price) info("price for selling bLuna set to {}".format(params.inv_sell_price)) elif command == 'buy-price': buy_price = get_arg_safe(args) if buy_price: params.buy_price = float(buy_price) info("price for buying bLuna set to {}".format(params.buy_price)) elif command == 'spread': spread = get_arg_safe(args) if spread: params.spread = float(spread) info("max spread set to {}".format(params.spread)) elif command == 'bot': bot.run(params, terra, wallet) elif command == 'mode-buy': params.mode = const.buy info("set mode to buy ({})".format(params.mode)) elif command == 'mode-sell': params.mode = const.sell info("set mode to sell ({})".format(params.mode)) elif command == 'buy': _, price = get_bluna_for_luna_price(terra, params) buy(params, terra, price, wallet) elif command == 'sell': _, price = get_luna_for_bluna_price(terra, params) sell(params, terra, price, wallet) else: info('Invalid Command.') except terra_sdk.exceptions.LCDResponseError as e: warn(str(e)) def setup(is_bot=False): params = create_params(is_bot) terra = create_terra() wallet = create_wallet(terra) return params, terra, wallet if __name__ == "__main__": args = sys.argv if len(args) > 1 and args[1] == 'bot': params, terra, wallet = setup(True) bot.run(params, terra, wallet) else: main()
FelixMohr/terratrader
src/helpers.py
from typing import List, Union import datetime import terra_sdk.client.lcd from halo import Halo from terra_sdk.core import Coins from src import const from src.params import Params from terra_sdk.client.lcd import LCDClient, Wallet from terra_sdk.key.raw import RawKey import os import time import requests def create_params(is_bot=False) -> Params: params = Params() if is_bot: params.never_log = True return params def create_terra() -> LCDClient: prices = requests.get(const.gas_price_url).json() uusd = prices["uusd"] coins = Coins(uusd=uusd) client = LCDClient(chain_id=const.chain_id, url=const.lcd_url, gas_prices=coins, gas_adjustment=1.4) info("Connected to " + const.chain_id + " via " + const.lcd_url) return client def create_wallet(client: LCDClient) -> Wallet: pk = get_pk().strip() key = RawKey.from_hex(pk) wallet = terra_sdk.client.lcd.Wallet(client, key) return wallet def get_arg_safe(args: List[str], idx=0) -> str: if not len(args): warn("not enough arguments") return "" return args[idx] def to_u_unit(luna: float) -> int: return round(luna * 1000000) def from_u_unit(uluna: int) -> float: return uluna / 1000000.0 def get_pk() -> str: return os.getenv('PK') def info(s: str, do_log=False): print(" 🛰 {}".format(s)) if do_log: with open(const.log_file, 'a+') as f: s = str(datetime.datetime.now()) + " -- " + s + "\n" f.write(s) def warn(s: str): print(" 👾 {}".format(s)) def get_system_time_millis() -> int: return round(time.time() * 1000) def get_infos_from_url(url: str, keys: List[str]) -> List[str]: response = requests.get(url).json() if not response: warn("could not get json response from {}".format(url)) return ["" for _ in range(len(keys))] result = list() for k in keys: if k in response: result.append(response[k]) else: result.append("") warn("{} not in result from {}".format(k, url)) return result def start_halo(text: str, params: Params, spinner='dots', text_color='magenta') -> Union[Halo | None]: if not params.never_log: spinner = Halo(text=text, spinner=spinner, text_color=text_color) spinner.start() return spinner def stop_halo(spinner: Union[Halo | None]): if spinner: spinner.stop()
FelixMohr/terratrader
src/const.py
<reponame>FelixMohr/terratrader # ----------- trading ----------- # luna bLuna pair in terra swap luna_bluna = "terra1jxazgm67et0ce260kvrpfv50acuushpjsz2y0p" bluna_contract = "<KEY>" luna_info = { "native_token": { "denom": "uluna" } } # has contract address of bLuna token bluna_info = { "token": { "contract_addr": "<KEY>" } } buy = 0 sell = 1 # ----------- loans ----------- market_address = "terra1sepfj7s0aeg5967uxnfk4thzlerrsktkpelm5s" overseer_address = "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8" aust_address = "terra1hzh9vpxhsk8253se0vv5jj6etdvxu3nv8z07zu" market_api_url = "https://api.anchorprotocol.com/api/v1/market/ust" # ----------- uncategorized ----------- log_file = "out.log" gas_price_url = "https://fcd.terra.dev/v1/txs/gas_prices" chain_id = "columbus-5" lcd_url = "https://lcd.terra.dev"
FelixMohr/terratrader
src/loans_core.py
from terra_sdk.client.lcd import Wallet from terra_sdk.core import Coins, Coin from terra_sdk.core.wasm import MsgExecuteContract from src import const from src.helpers import info, to_u_unit, get_infos_from_url, from_u_unit, warn from src.messages import get_borrower_info_msg, get_borrow_limit_uust_from_msg, get_loan_uust_from_msg, \ get_borrow_limit_msg, get_withdraw_msg, get_repay_stable_msg from src.params import Params def get_loan_uust(wallet: Wallet): terra = wallet.lcd result = terra.wasm.contract_query(const.market_address, get_borrower_info_msg(wallet.key.acc_address)) return get_loan_uust_from_msg(result) def get_borrow_limit_uust(wallet: Wallet): terra = wallet.lcd result = terra.wasm.contract_query(const.overseer_address, get_borrow_limit_msg(wallet.key.acc_address)) return get_borrow_limit_uust_from_msg(result) def withdraw(wallet: Wallet, amount_aust: float, params: Params): msg = MsgExecuteContract(wallet.key.acc_address, const.aust_address, get_withdraw_msg(amount_aust)) return execute_contract(msg, wallet, params) def execute_contract(exc, wallet, params: Params) -> bool: execute_tx = wallet.create_and_sign_tx(msgs=[exc]) execute_tx_result = wallet.lcd.tx.broadcast(execute_tx) info(str(execute_tx_result), params.should_log()) return True def get_aust_to_ust_rate() -> float: rate_str = get_infos_from_url(const.market_api_url, ["exchange_rate"])[0] if not rate_str: raise ValueError("could not get exchange rate from {}".format(const.market_api_url)) return float(rate_str) def execute_repay(wallet: Wallet, params: Params, current_loan_uust: int): # repays 1/3 of the current loan if current_loan_uust <= 0: warn("currently no loan, skipping") return try: rate = get_aust_to_ust_rate() except ValueError: warn("not able to fetch aUST value, using 1.155") rate = 1.155 current_loan_ust = from_u_unit(current_loan_uust) repay_ust = current_loan_ust / 3 safe_balance_ust = from_u_unit(get_safe_balance(wallet)) if safe_balance_ust < repay_ust: withdraw_aust_amount = current_loan_ust / (3 * rate) - safe_balance_ust withdraw(wallet, withdraw_aust_amount, params) safe_balance_ust = from_u_unit(get_safe_balance(wallet)) info("repaying {}, safe balance is {}".format(repay_ust, safe_balance_ust)) repay(wallet, repay_ust, params) def repay(wallet: Wallet, amount_ust: float, params: Params): msg = MsgExecuteContract(wallet.key.acc_address, const.market_address, get_repay_stable_msg(), Coins(uusd=to_u_unit(amount_ust))) info("executing msg:") info(str(msg)) return execute_contract(msg, wallet, params) def get_balance(wallet: Wallet): coin: Coin | None = wallet.lcd.bank.balance(wallet.key.acc_address).get('uusd') if coin: return coin.amount return 0 def get_safe_balance(wallet: Wallet) -> int: # returns the balance minus 1 UST for future transactions return max(get_balance(wallet) - 1000000, 0)
FelixMohr/terratrader
src/bot.py
from time import sleep from terra_sdk.client.lcd import LCDClient, Wallet from src.trading_core import get_bluna_for_luna_price, buy, get_luna_for_bluna_price, sell from src.loans_core import get_loan_uust, get_borrow_limit_uust, execute_repay from src.params import Params from src.helpers import info, get_system_time_millis, warn from src import const def run(params: Params, terra: LCDClient, wallet: Wallet): info("starting bot. send keyboard interrupt to stop") params.set_logging(True) last_timestamp = get_system_time_millis() last_timestamp_loans = get_system_time_millis() while True: seconds_passed_trade = (get_system_time_millis() - last_timestamp) / 1000.0 seconds_passed_loans = (get_system_time_millis() - last_timestamp_loans) / 1000.0 try: if seconds_passed_loans >= params.sleep_time_loans_seconds: check_loans(params, wallet) last_timestamp_loans = get_system_time_millis() check_trades(params, terra, wallet) sleep(max(0, params.sleep_time_seconds - seconds_passed_trade)) last_timestamp = get_system_time_millis() except KeyboardInterrupt: break params.set_logging(False) print() info("bot was stopped.") def check_trades(params, terra, wallet): if params.mode == const.buy: check_buy(params, terra, wallet) else: check_sell(params, terra, wallet) def check_buy(params: Params, terra: LCDClient, wallet: Wallet): return_amount, price = get_bluna_for_luna_price(terra, params) if price > params.buy_price: info("price {} vs. {}, not buying".format(price, params.buy_price), params.should_log()) if price - params.buy_price <= 0.001: params.sleep_time_seconds = 1 else: params.sleep_time_seconds = 3 else: result = buy(params, terra, price, wallet) if result: info("🚀 bought bLuna for {} luna".format(params.amount_luna), params.should_log()) params.switch_mode() else: warn("error while executing transaction") def check_sell(params: Params, terra: LCDClient, wallet: Wallet): return_amount, price = get_luna_for_bluna_price(terra, params) diff = 1 / price - params.inv_sell_price if diff < 0: info("price {} vs. {}, not selling".format(1 / price, params.inv_sell_price), params.should_log()) if abs(diff) <= 0.001: params.sleep_time_seconds = 1 else: params.sleep_time_seconds = 3 else: result = sell(params, terra, price, wallet) if result: info("🚀 sold {} bLuna".format(params.amount_bluna), params.should_log()) params.switch_mode() else: warn("error while executing transaction") def check_loans(params, wallet): loan = get_loan_uust(wallet) borrow_limit = get_borrow_limit_uust(wallet) used = loan / borrow_limit if used >= params.repay_ratio: info("used ratio is {}, repaying loan partially.".format(used)) execute_repay(wallet, params, loan) else: info("used ratio is {} of {}.".format(used, params.repay_ratio))
FelixMohr/terratrader
src/params.py
from src import const class Params(object): def __init__(self): self.amount_luna = 1.0 self.amount_bluna = 1.0 self.inv_sell_price = 0.985 self.buy_price = 0.975 self.mode = const.buy self.spread = 0.007 self.sleep_time_seconds = 3 self.sleep_time_loans_seconds = 5 self.do_log = False self.never_log = False self.repay_ratio = 0.8 def set_logging(self, mode: bool): self.do_log = mode def should_log(self): return self.do_log and not self.never_log def switch_mode(self): self.mode = abs(self.mode - 1)
FelixMohr/terratrader
src/messages.py
<reponame>FelixMohr/terratrader import base64 from typing import Dict from src import const from src.helpers import to_u_unit from src.params import Params def get_borrower_info_msg(borrower_address: str) -> Dict: return { "borrower_info": {"borrower": borrower_address} } def get_loan_uust_from_msg(msg: Dict) -> int: return int(msg['loan_amount']) def get_borrow_limit_msg(borrower_address: str) -> Dict: return { "borrow_limit": {"borrower": borrower_address} } def get_borrow_limit_uust_from_msg(msg: Dict) -> int: return int(msg['borrow_limit']) def get_repay_stable_msg() -> Dict: return { "repay_stable": {} } def get_withdraw_msg(amount_aust: float) -> Dict: return { "send": { "amount": str(to_u_unit(amount_aust)), "contract": const.market_address, "msg": "eyJyZWRlZW1fc3RhYmxlIjp7fX0=" } } def get_sell_msg(max_spread: float, belief_price: float) -> str: jsn = '{"swap":{"max_spread":"' + str(max_spread) + '","belief_price":"' + str(belief_price) + '"}}' encoded = jsn.encode() return str(base64.b64encode(encoded), "utf-8") def get_sell_dict(belief_price: float, params: Params) -> Dict: msg = get_sell_msg(params.spread, belief_price) amount = str(to_u_unit(params.amount_bluna)) return { "send": { "amount": amount, "contract": const.luna_bluna, "msg": msg } } def get_buy_dict(belief_price: float, params: Params) -> Dict: amount = str(to_u_unit(params.amount_luna)) return { "swap": { "belief_price": str(belief_price), "max_spread": str(params.spread), "offer_asset": { "amount": amount, "info": { "native_token": { "denom": "uluna" } } } } }
nappernick/envelope
app/api/user_routes.py
from datetime import datetime from werkzeug.security import generate_password_hash from flask import Blueprint, jsonify, request from sqlalchemy.orm import joinedload from flask_login import login_required from app.models import db, User, Type from app.forms import UpdateUserForm from .auth_routes import authenticate, validation_errors_to_error_messages user_routes = Blueprint('users', __name__) @user_routes.route("/types") def types(): types = db.session.query(Type).all() return jsonify([type.name_to_id() for type in types]) @user_routes.route('/') @login_required def users(): users = db.session.query(User).all() return jsonify([user.to_dict_full() for user in users]) @user_routes.route('/<int:id>') @login_required def user(id): user = User.query.get(id) return user.to_dict() @user_routes.route('/<int:id>', methods=["DELETE"]) @login_required def user_delete(id): user = User.query.get(id) db.session.delete(user) db.session.commit() return { id: "Successfully deleted" } @user_routes.route('/<int:id>', methods=["POST"]) @login_required def user_update(id): user = User.query.options(joinedload("type")).get(id) form = UpdateUserForm() form['csrf_token'].data = request.cookies['csrf_token'] if form.validate_on_submit(): print("_______ FORM DATA",form.data) user.username=form.data['username'], user.email=form.data['email'], user.hashed_password=generate_password_hash(form.password.data), user.first_name=form.data['first_name'], user.last_name=form.data['last_name'], user.type_id=form.data['type_id'], user.updated_at=datetime.now() db.session.commit() return user.to_dict_full() return {'errors': validation_errors_to_error_messages(form.errors)} @user_routes.route("/<int:id>/clients") @login_required def admin_fetch_clients(id): authenticated = authenticate() clientUsers = db.session.query(User).filter_by(type_id=2).all() if authenticated["type_id"] != 1: return jsonify({ "errors": [ "Unauthorized" ] }) return jsonify([user.to_dict_full() for user in clientUsers])
nappernick/envelope
app/api/alt_envelope_api.py
<reponame>nappernick/envelope import pandas as pd import csv import tempfile import pysurveycto # # SAM ORIGINAL # # Get data from SurveyCTO API scto = pysurveycto.SurveyCTOObject('envelope', '<EMAIL>', 'Envelope-VisX') raw = scto.get_form_data('NORC-IGA-Endline-Menage') # # Change Directory # import os # os.chdir(r"C:\Users\Samuel\PycharmProjects\pysurveycto_example") # cwd = os.getcwd() # # Write raw text to CSV file # text_file = open("csv.txt", "w") # csv = text_file.write(raw) # text_file.close() # # NICK STUFF # # Import CSV file # df = pd.read_csv('csv.txt', header = 0, encoding='latin-1') # # Get SurveyCTO data # scto = pysurveycto.SurveyCTOObject('envelope', '<EMAIL>', 'Envelope-VisX') # raw = scto.get_form_data('NORC-IGA-Endline-Menage') # # Turn raw data into dataframe # tp = tempfile.NamedTemporaryFile() # tp.write(raw.encode("utf-8")) # with open(tp.name, newline="\n", mode='r') as f: # df = pd.read_csv(f, header = 0, encoding='latin-1') # # print will only show a small part of the dataframe, but proves it works # # print(df.head) # for thing in df.iterrows(): # print(thing) # # Close the temporary file, which deletes it - new dataframe persists # tp.close() # # Alternatively, if you don't need to use a dataframe, this approach will give you # # something very easy to work with - a list of lists where each list in the list is a row: # # [ ["SubmissionDate", "start", "end", "deviceid", ...], ["Jan 26, 2021 2:43:48 AM","Jan 26, 2021 2:40:55 AM","Jan 26, 2021 2:43:48 AM","(web)",...] ] file_it = raw.split("\n") reader = csv.reader(file_it, delimiter=",") print([row for row in reader]) # # TESTING # # API is very slow & I'm testing, so using the local file: # df = pd.read_csv('/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/csv.txt', header = 0, encoding='latin-1') # # for thing in df.iterrows(): # # print(thing) # headers = df.columns.values # for index, header in enumerate(headers): # print(index) # print(header) with open('/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/csv.txt') as f: reader = csv.reader(f, delimiter=",") ls = [] for index, row in reader: ls.append(index) print(ls)
nappernick/envelope
app/api/__init__.py
<reponame>nappernick/envelope from .auth_routes import validation_errors_to_error_messages
nappernick/envelope
app/api/data_set_routes.py
<gh_stars>1-10 import os import csv import zipfile import pickle import threading import pandas as pd import numpy as np import sys # For API import pysurveycto # Redis queue from rq import Queue from rq.job import Job from app.redis import conn # File reading utility from io import BytesIO from datetime import datetime from flask import Flask, Blueprint, jsonify, request from flask_login import login_required, current_user from app.models import db, DataSet, HealthArea, Survey, Project from .auth_routes import authenticate from sqlalchemy.orm import joinedload from .data_processing import data_processing_for_survey_records, process_data_for_reader data_set_routes = Blueprint('data', __name__) # Redis job queue q = Queue(connection=conn) @data_set_routes.route('/data-sets') @login_required def data(): curr_user = current_user.to_dict() if curr_user["type_id"] == 1: data = db.session.query(DataSet).all() return jsonify([data_set.to_dict() for data_set in data]) else: return {'errors': ['Unauthorized']}, 401 # Workaround for Heroku front-end to back-end open connections limited to 30 seconds @data_set_routes.route("/upload", methods=['POST']) @login_required def data_file_upload(): file = request.files['data-set'] # print(file.content_type) types = ["application/zip", "text/csv", "application/octet-stream"] if (file and file.content_type in types): # try pympler profiler post_ds = threading.Thread(target = async_ds_post, args=[file]) post_ds.start() return jsonify("Successful file upload.") else: return {"errors": ["Files were not successfully passed to the API."]}, 500 # Helper function to make the data-set upload with the frontend asynchronous for polling def async_ds_post(file): import sys from app import app, db with app.app_context(): file_name = file.filename file_name_list = file.filename.split(".") if file_name_list[len(file_name_list)-1] == "dta": data = pd.io.stata.read_stata(file) csv_file = data.to_csv() file_final = pickle.dumps(csv_file) elif file_name_list[len(file_name_list)-1] == "zip": file_like_object = BytesIO(file.read()) zipfile_ob = zipfile.ZipFile(file_like_object) file_names = zipfile_ob.namelist() file_names = [file_name for file_name in file_names if not "__MACOSX/." in file_name] files = [zipfile_ob.open(name).read() for name in file_names] file_final = files[0].decode("utf-8") file_final = pickle.dumps(file_final) elif file_name_list[len(file_name_list)-1] == "csv": csv_file=file.read() file_final = pickle.dumps(csv_file) else: return {"errors": "This file type is not accepted, please only upload .dta, .csv, or .csv.zip file types."} data_set = DataSet( data_set_name=file_name, data_set=file_final ) db.session.add(data_set) db.session.commit() sys.exit() return sys.exit() return @data_set_routes.route("/<int:dataSetId>", methods=["POST"]) @login_required def update_data_set(dataSetId): data_set = db.session.query(DataSet).get(dataSetId) req = dict(request.json) data_set.data_set_name = req['data_set_name'] data_set.updated_at = datetime.now() db.session.commit() return data_set.to_dict() @data_set_routes.route("/<int:dataSetId>", methods=["DELETE"]) @login_required def delete_data_set(dataSetId): data_set = db.session.query(DataSet).get(dataSetId) db.session.delete(data_set) db.session.commit() return { dataSetId: "Successfully deleted" } @data_set_routes.route('/<int:dataSetId>/projects/<int:projectId>/violinplot/<surveyField>') @login_required def violin_plot(dataSetId, projectId, surveyField): curr_user = current_user.to_dict() if curr_user["type_id"] == 1: surveys_query = db.session.query(DataSet.id,\ Survey.project_id,Survey.enumerator_id, Survey.health_area_id, \ Survey.duration, Survey.date_time_administered, Survey.num_outlier_data_points, Survey.num_dont_know_responses\ # If the user is admin, query only by data set & project id's ).filter(DataSet.id==dataSetId, Project.id ==projectId)\ .join(Project, Project.data_set_id==DataSet.id)\ .join(Survey, Survey.project_id==Project.id) else: surveys_query = db.session.query(DataSet.id,\ Survey.project_id,Survey.enumerator_id, Survey.health_area_id, Survey.duration, \ Survey.date_time_administered, Survey.num_outlier_data_points, Survey.num_dont_know_responses\ # If the user isn't admin, query with current user id ).filter(DataSet.id==dataSetId, Project.user_id ==current_user.id, Project.id ==projectId)\ .join(Project, Project.data_set_id==DataSet.id)\ .join(Survey, Survey.project_id==Project.id) surveys = db.session.execute(surveys_query) result_obj = {} values_list = [] outliers = [] list_of_dict_survey_values = [dict(survey)[f"surveys_{surveyField}"] for survey in surveys] for value in list_of_dict_survey_values: values_list.append(int(value)) if int(value) in result_obj: result_obj[int(value)] += 1 else: result_obj[int(value)] = 1 q1, q3= np.percentile(values_list,[25,75]) iqr = q3 - q1 lower_bound = q1 -(1.5 * iqr) upper_bound = q3 +(1.5 * iqr) for value in list_of_dict_survey_values: if int(value) < lower_bound and int(value) not in outliers: outliers.append(int(value)) if int(value) > upper_bound and int(value) not in outliers: outliers.append(int(value)) final_obj = {} final_obj["data_for_box_plot"] = {} final_obj["data_for_box_plot"]["value_count_pairs"] = result_obj final_obj["data_for_violin_plot"] = [{"value": key, "count": value} for key, value in result_obj.items()] final_obj["data_for_box_plot"]["min"] = min(values_list) final_obj["data_for_box_plot"]["max"] = max(values_list) final_obj["data_for_box_plot"]["median"] = np.median(values_list) final_obj["data_for_box_plot"]["lower_bound"] = lower_bound final_obj["data_for_box_plot"]["upper_bound"] = upper_bound final_obj["data_for_box_plot"]["first_quartile"] = q1 final_obj["data_for_box_plot"]["third_quartile"] = q3 final_obj["data_for_box_plot"]["outliers"] = outliers return jsonify(final_obj) @data_set_routes.route('/<int:dataSetId>/projects/<int:projectId>/violinplot/<surveyField>/by-enumerator') @login_required def violin_plot_all_enumerators(dataSetId, projectId, surveyField): curr_user = current_user.to_dict() if curr_user["type_id"] == 1: surveys_query = db.session.query(DataSet.id,\ getattr(Survey, surveyField),Survey.enumerator_id ).filter(DataSet.id==dataSetId, Project.id ==projectId)\ .join(Project, Project.data_set_id==DataSet.id)\ .join(Survey, Survey.project_id==Project.id) else: surveys_query = db.session.query(DataSet.id,\ getattr(Survey, surveyField),Survey.enumerator_id # If the user isn't admin, query with current user id ).filter(DataSet.id==dataSetId, Project.user_id ==current_user.id, Project.id ==projectId)\ .join(Project, Project.data_set_id==DataSet.id)\ .join(Survey, Survey.project_id==Project.id) surveys = db.session.execute(surveys_query) surveys_list = [dict(survey) for survey in surveys] by_enum = {} for survey in surveys_list: enumerator = survey["surveys_enumerator_id"] if enumerator in by_enum: by_enum[enumerator]["values"].append(survey[f"surveys_{surveyField}"]) by_enum[enumerator]["count"] += 1 else: by_enum[enumerator] = { "values": [survey[f"surveys_{surveyField}"]], "count": 1 } final_enum_list = list() for enumerator, obj in by_enum.items(): enum_obj = {} result_obj = {} values_list = sorted(obj["values"]) count = obj["count"] outliers = [] for value in values_list: if int(value) in result_obj: result_obj[int(value)] += 1 else: result_obj[int(value)] = 1 q1, q3= np.percentile(values_list,[25,75]) iqr = q3 - q1 # This uses 1.5 standard deviations lower_bound = q1 -(1.5 * iqr) upper_bound = q3 +(1.5 * iqr) minVal = values_list[0] maxVal = values_list[-1] for value in values_list: if int(value) < lower_bound and int(value) not in outliers: outliers.append(int(value)) if int(value) > upper_bound and int(value) not in outliers: outliers.append(int(value)) enum_obj["enumerator"] = enumerator enum_obj["data_for_box_plot"] = {} enum_obj["data_for_box_plot"]["value_count_pairs"] = result_obj enum_obj["data_for_violin_plot"] = [{"value": key, "count": value} for key, value in result_obj.items()] enum_obj["data_for_box_plot"]["enumerator"] = enumerator enum_obj["data_for_box_plot"]["min"] = minVal enum_obj["data_for_box_plot"]["max"] = maxVal enum_obj["data_for_box_plot"]["median"] = np.median(values_list) enum_obj["data_for_box_plot"]["lower_bound"] = lower_bound enum_obj["data_for_box_plot"]["upper_bound"] = upper_bound enum_obj["data_for_box_plot"]["first_quartile"] = q1 enum_obj["data_for_box_plot"]["third_quartile"] = q3 enum_obj["data_for_box_plot"]["outliers"] = outliers final_enum_list.append(enum_obj) return jsonify(final_enum_list) @data_set_routes.route('/<int:dataSetId>/violinplot/<int:surveyField>/by-enumerator') @login_required def violin_plot_by_enumerator(dataSetId, surveyField): curr_user = current_user.to_dict() surveys_query = db.session.query(DataSet.id,\ Survey.project_id,Survey.enumerator_id, Survey.health_area_id, Survey.duration, Survey.date_time_administered, Survey.num_outlier_data_points, Survey.num_dont_know_responses\ ).filter(DataSet.id==dataSetId, Project.user_id ==current_user.id)\ .join(Project, Project.data_set_id==DataSet.id)\ .join(Survey, Survey.project_id==Project.id) surveys = db.session.execute(surveys_query) result_dict = {} for survey in surveys: if dict(survey)["surveys_enumerator_id"] in result_dict: result_dict[dict(survey)["surveys_enumerator_id"]].append(dict(survey)[f"surveys_{surveyField}"]) else: result_dict[dict(survey)["surveys_enumerator_id"]] = [dict(survey)[f"surveys_{surveyField}"]] # OLD WAY - SLOW: # surveys = db.session.query(DataSet).get(dataSetId).projects.filter_by(user_id=curr_user["id"]).first().surveys # for survey in surveys: # print(survey) # data_set_for_graph = [] # surveys_list = list(surveys) # for survey in surveys_list: # data_set_for_graph.append(survey.to_dict()) # return jsonify({dict(row)["surveys_enumerator_id"]: [dict(row)[f"surveys_{surveyField}"] for row in surveys]}) return jsonify(result_dict) @data_set_routes.route("/projects/<int:projectId>/health-areas/<int:healthAreaId>/map") @login_required def map_surveys(projectId, healthAreaId): health_area = db.session.query(HealthArea).options(joinedload("ha_surveys")).get(healthAreaId) surveys = health_area.to_dict_full_for_project(projectId)['surveys'] meaningful_fields = ["date_time_administered", "duration", "num_dont_know_responses", "num_outlier_data_points", "enumerator_id", "respondent"] map_data = { "type": "FeatureCollection", "count_surveys": len(surveys), "features": [ { "type": "Feature", "geometry": { "type": "Point", "coordinates": [ survey["latitude"], survey["longtitude"] ] }, "properties": {field: survey[field] for field in survey if field in meaningful_fields} } for survey in surveys] } return jsonify(map_data) @data_set_routes.route("/projects/<int:projectId>/health-areas/<int:healthAreaId>/center") @login_required def map_survey_center(projectId, healthAreaId): health_area = db.session.query(HealthArea).options(joinedload("ha_surveys")).get(healthAreaId) surveys = health_area.to_dict_full()['surveys'] lat = {"sum": 0, "count": 0} long = {"sum": 0, "count": 0} largest_lat = float("-inf") largest_long = float("-inf") for survey in surveys: if abs(survey["latitude"]) > largest_lat: largest_lat = abs(survey["latitude"]) if abs(survey["longtitude"]) > largest_long: largest_long = abs(survey["longtitude"]) lat["sum"] += survey["latitude"] lat["count"] += 1 long["sum"] += survey["longtitude"] long["count"] += 1 lat_avg = lat["sum"]/lat["count"] long_avg = long["sum"]/long["count"] largest_lat_diff = abs(abs(lat_avg) - largest_lat) largest_long_diff = abs(abs(long_avg) - largest_long) return jsonify([ lat_avg, long_avg, largest_lat_diff, largest_long_diff]) @data_set_routes.route("/health-areas") @login_required def health_areas(): health_areas = db.session.query(HealthArea) return jsonify(health_areas) @data_set_routes.route("/<int:dataSetId>/projects/<int:projectId>/health-areas") @login_required def project_health_areas(projectId,dataSetId): health_area_ids = Survey.get_health_area_ids(projectId) health_areas = HealthArea.class_to_dict() ha_names = [{"name": health_areas[ha_id], "id": ha_id} for ha_id in health_area_ids] return jsonify(ha_names) @data_set_routes.route("/survey-cto") @login_required def survey_cto_api(): scto = pysurveycto.SurveyCTOObject('envelope', '<EMAIL>', 'Envelope-VisX') raw = scto.get_form_data('NORC-IGA-Endline-Menage') final_file = pickle.dumps(raw) data_set = DataSet( data_set_name='NORC-IGA-Endline-Menage(API).csv', data_set=final_file ) db.session.add(data_set) db.session.commit() return data_set.to_dict()
nappernick/envelope
app/seeds/types.py
from app.models import db, Type def seed_types(): admin = Type(type="Admin") client = Type(type="Client") lb = Type(type="Low Bandwidth") db.session.add(admin) db.session.add(client) db.session.add(lb) db.session.commit() def undo_types(): db.session.execute('TRUNCATE TABLE types RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/api/playing_with_redis.py
<filename>app/api/playing_with_redis.py<gh_stars>1-10 # Redis job queue q = Queue(connection=conn) @data_set_routes.route('/data-sets') @login_required def data(): curr_user = current_user.to_dict() if curr_user["type_id"] == 1: data = db.session.query(DataSet).all() return jsonify([data_set.to_dict() for data_set in data]) else: return {'errors': ['Unauthorized']}, 401 # Helper function to make the data-set upload with the frontend asynchronous for polling def async_ds_post(file_name, file_contents): # from app import app, db # with app.app_context(): file_name_list = file_name.split(".") if file_name_list[-1] == "dta": data = pd.io.stata.read_stata(file) csv_file = data.to_csv() file_final = pickle.dumps(csv_file) elif file_name_list[-1] == "zip": file_like_object = BytesIO(file_contents) zipfile_ob = zipfile.ZipFile(file_like_object) file_names = zipfile_ob.namelist() file_names = [file_name for file_name in file_names if not "__MACOSX/." in file_name] files = [zipfile_ob.open(name).read() for name in file_names] file_final = files[0].decode("utf-8") file_final = pickle.dumps(file_final) elif file_name_list[-1] == "csv": csv_file=file_contents print("GOT HERE IN CSV READER") file_final = pickle.dumps(csv_file) else : return {"errors": "This file type is not accepted, please only upload .dta, .csv, or .csv.zip file types."} # try: data_set = DataSet( data_set_name=file_name, data_set=file_final ) db.session.add(data_set) db.session.commit() # except: # return {"errors": "Unable to add file to database, try again."} # return # Workaround for Heroku front-end to back-end open connections limited to 30 seconds @data_set_routes.route("/upload", methods=['POST']) @login_required def data_file_upload(): file = request.files['data-set'] # print("FILE CONTENT TYPE_________",list(file.read())) types = ["application/zip", "text/csv", "application/octet-stream"] if (file and file.content_type in types): # post_ds = threading.Thread(target = async_ds_post, args=[file]) q.enqueue_call(func=async_ds_post, args=(file.filename, file.read())) return jsonify("Successful file upload.") else: return {"errors": ["Files were not successfully passed to the API."]}, 500
nappernick/envelope
app/seeds/__init__.py
<filename>app/seeds/__init__.py<gh_stars>1-10 from flask.cli import AppGroup from .users import seed_users, undo_users from .types import seed_types, undo_types from .data_sets import seed_data_sets, undo_data_sets from .projects import seed_projects, undo_projects from .health_areas import seed_health_areas, undo_health_areas from .surveys import seed_surveys, undo_surveys # Creates a seed group to hold our commands # So we can type `flask seed --help` seed_commands = AppGroup('seed') @seed_commands.command('file') def seed_file(): seed_surveys() # Creates the `flask seed all` command @seed_commands.command('all') def seed(): seed_types() seed_users() seed_data_sets() seed_projects() seed_health_areas() seed_surveys() # Creates the `flask seed undo` command @seed_commands.command('undo') def undo(): undo_types() undo_users() undo_data_sets() undo_projects() undo_health_areas() undo_surveys()
nappernick/envelope
app/seeds/data_sets.py
<reponame>nappernick/envelope from app.models import db, DataSet import csv import pickle def seed_data_sets(): # with open('/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv') as csvfile: with open('/var/www/app/seeds/seed_survey.csv') as csvfile: reader = csv.reader(csvfile) parsed_file = [] for row in reader: parsed_file.append(row) pickledfile = pickle.dumps(parsed_file) data_set1 = DataSet( data_set_name="DRC Annual Survey.csv", data_set=pickledfile, ) db.session.add(data_set1) data_set2 = DataSet( data_set_name="RMAC Water Access.dta", data_set=pickledfile, ) db.session.add(data_set2) data_set3 = DataSet( data_set_name="WHO SSA W18-24 Lit.csv.zip", data_set=pickledfile, ) db.session.add(data_set3) db.session.commit() # seed_data_sets() def undo_data_sets(): db.session.execute('TRUNCATE TABLE data_sets RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/forms/project_form.py
from flask_wtf import FlaskForm from wtforms import StringField, IntegerField from wtforms.validators import DataRequired, ValidationError from app.models import db, Project def project_name_unique(form, field): name = field.data project = db.session.query(Project).filter(Project.project_name == name).first() if project: raise ValidationError("Project name already exists.") class ProjectForm(FlaskForm): project_name = StringField("project_name", validators=[DataRequired(), project_name_unique]) data_set_id = IntegerField("data_set_id", validators=[DataRequired()]) data_set_id = IntegerField("data_set_id", validators=[DataRequired()]) user_id = IntegerField("user_id", validators=[DataRequired()]) target_health_area_count = IntegerField("target_health_area_count", validators=[DataRequired()]) target_surv_count = IntegerField("target_surv_count", validators=[DataRequired()])
nappernick/envelope
app/seeds/surveys.py
<reponame>nappernick/envelope<gh_stars>1-10 from app.models import db, Survey from app.api.data_processing import data_processing_for_survey_records from faker import Faker fake = Faker() def seed_surveys(): # surveys = data_processing_for_survey_records("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv") surveys = data_processing_for_survey_records("/var/www/app/seeds/seed_survey.csv") for survey in surveys.values(): survey_seed = Survey( health_area_id=int(survey["health_area"]), project_id=1, enumerator_id=survey["enumerator_id"], date_time_administered=survey["date_time_administered"], duration=survey["duration"], respondent=fake.name(), num_outlier_data_points=int(float(str(survey["num_outlier_data_points"]))), num_dont_know_responses=int(float(str(survey["num_dont_know_responses"]))), lat=survey["lat"], long=survey["long"], outside_health_zone=False ) db.session.add(survey_seed) db.session.commit() def undo_surveys(): db.session.execute('TRUNCATE TABLE surveys RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/__init__.py
<reponame>nappernick/envelope import os from flask import Flask, render_template, request, session from flask_cors import CORS from flask_migrate import Migrate from flask_wtf.csrf import CSRFProtect, generate_csrf from flask_login import LoginManager from .models import db, User from .api.user_routes import user_routes from .api.auth_routes import auth_routes from .api.data_set_routes import data_set_routes from .api.project_routes import project_routes from .api.survey_routes import survey_routes from .seeds import seed_commands from .config import Config app = Flask(__name__) login = LoginManager(app) login.login_view = 'auth.unauthorized' @login.user_loader def load_user(id): return User.query.get(int(id)) app.cli.add_command(seed_commands) app.config.from_object(Config) app.register_blueprint(user_routes, url_prefix='/api/users') app.register_blueprint(auth_routes, url_prefix='/api/auth') app.register_blueprint(data_set_routes, url_prefix='/api/data') app.register_blueprint(project_routes, url_prefix='/api/projects') app.register_blueprint(survey_routes, url_prefix='/api/surveys') db.init_app(app) Migrate(app, db) CORS(app) @app.after_request def inject_csrf_token(response): response.set_cookie('csrf_token', generate_csrf(), secure=True if os.environ.get( 'FLASK_ENV') == 'production' else False, samesite='Strict' if os.environ.get( 'FLASK_ENV') == 'production' else None, httponly=True) return response @app.route('/', defaults={'path': ''}) @app.route('/<path:path>') def react_root(path): if path == 'favicon.ico': return app.send_static_file('favicon.ico') return app.send_static_file('index.html')
nappernick/envelope
app/forms/__init__.py
<filename>app/forms/__init__.py from .login_form import LoginForm from .signup_form import SignUpForm from .project_form import ProjectForm from .update_user_form import UpdateUserForm
nappernick/envelope
app/seeds/projects.py
<reponame>nappernick/envelope from app.models import db, Project def seed_projects(): project1 = Project( project_name="NORC Clean Water", project_notes="", data_set_id=1, user_id=2, target_health_area_count=172, target_surv_count=24, survey_count=1884, avg_duration=48.75372434536446, health_area_count=78, enumerator_count=25, dont_know_count=17990, outlier_count=308 ) db.session.add(project1) db.session.commit() def undo_projects(): db.session.execute('TRUNCATE TABLE projects RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/models/data_set.py
from .db import db class DataSet(db.Model): __tablename__ = "data_sets" id = db.Column(db.Integer, primary_key = True, index=True) data_set_name = db.Column(db.String(100), nullable = False, unique = True, index=True) data_set = db.Column(db.LargeBinary, nullable = False) created_at = db.Column(db.DateTime, server_default=db.func.now(), index=True) updated_at = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now(), index=True) projects = db.relationship("Project", backref="data-set", lazy='dynamic') def to_dict(self): return { "id": self.id, "data_set_name": self.data_set_name, # "data_set": self.data_set, "created_at": self.created_at, "updated_at": self.updated_at } def to_dict_partial_full(self): return { "id": self.id, "data_set_name": self.data_set_name, "created_at": self.created_at, "updated_at": self.updated_at, "projects": [project.to_dict() for project in self.projects] } def to_dict_full(self): return { "id": self.id, "data_set_name": self.data_set_name, "data_set": self.data_set, "created_at": self.created_at, "updated_at": self.updated_at, "projects": [project.to_dict() for project in self.projects] }
nappernick/envelope
app/models/health_area.py
from .db import db class HealthArea(db.Model): __tablename__ = "health_areas" id = db.Column(db.Integer, primary_key = True) health_area = db.Column(db.String(100), nullable = False, unique = True) ha_surveys = db.relationship("Survey", backref="health_area") def to_dict(self): return { "id": self.id, "health_area": self.health_area } def to_dict_full(self): return { "id": self.id, "health_area": self.health_area, "surveys": [survey.to_dict() for survey in self.ha_surveys] } def to_dict_full_for_project(self, project_id): return { "id": self.id, "health_area": self.health_area, "surveys": [survey.to_dict() for survey in self.ha_surveys if survey.project_id == project_id] } @classmethod def class_to_dict(cls): ha_dict = {} health_areas = db.session.query(HealthArea) for ha in health_areas: had = ha.to_dict() ha_dict[had["id"]] = had["health_area"] return ha_dict
nappernick/envelope
app/seeds/users.py
from app.models import db, User # Adds a demo user, you can add other users here if you want def seed_users(): demo = User(username='demo', email='<EMAIL>', password='password', first_name='Demo', last_name='User', type_id=1) db.session.add(demo) demo1 = User(username='billyblanks', email='<EMAIL>', password='password', first_name='William', last_name='Blanks', type_id=2) db.session.add(demo1) demo2 = User(username='pattycakes', email='<EMAIL>', password='password', first_name='Patricia', last_name='Cakes', type_id=2) db.session.add(demo2) db.session.commit() # Uses a raw SQL query to TRUNCATE the users table. # SQLAlchemy doesn't have a built in function to do this # TRUNCATE Removes all the data from the table, and resets # the auto incrementing primary key def undo_users(): db.session.execute('TRUNCATE TABLE users RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/seeds/health_areas.py
from app.models import db, HealthArea from app.api.data_processing import data_processing_for_health_areas def seed_health_areas(): health_areas = data_processing_for_health_areas("/var/www/app/seeds/seed_survey.csv") # health_areas = data_processing_for_health_areas("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv") for health_area1 in health_areas: health_area_seed = HealthArea( health_area=health_area1 ) db.session.add(health_area_seed) db.session.commit() def undo_health_areas(): db.session.execute('TRUNCATE TABLE health_areas RESTART IDENTITY CASCADE;') db.session.commit()
nappernick/envelope
app/forms/update_user_form.py
from flask_wtf import FlaskForm from wtforms import StringField, IntegerField, PasswordField from wtforms.validators import DataRequired, Email, ValidationError from app.models import User class UpdateUserForm(FlaskForm): username = StringField('username', validators=[DataRequired()]) email = StringField('email', validators=[DataRequired()]) password = PasswordField('password', validators=[DataRequired()]) type_id = IntegerField('type_id', validators=[DataRequired()]) first_name = StringField('first_name', validators=[DataRequired()]) last_name = StringField('last_name', validators=[DataRequired()])
nappernick/envelope
app/api/scratch.py
import csv import os import pandas as pd def dta_to_pickle(dta): data = pd.io.stata.read_stata(dta) csv_file = data.to_csv() file_final = pickle.dumps(csv_file) return file_final def zip_to_pickle(zip): zipfile_ob = zipfile.ZipFile(file_like_object) file_names = zipfile_ob.namelist() file_names = [file_name for file_name in file_names if not "__MACOSX/." in file_name] files = [zipfile_ob.open(name).read() for name in file_names] file_final = files[0] file_final = pickle.dumps(file_final) return file_final # def data_processing_creating_survey_records_from_file(csvf): # print(csvf) # # csv_test = ("test.csv", csvf) # with open(csvf) as csvfile: # data_processing_creating_survey_records(csvfile) # def data_processing_creating_survey_records(csvfile): # # original column headings: ["today_date", "start_tracking_time", "q9","q12", "q5latitude", "q5longitude", "duration_itw", "dk_total", "int_outlier_total"] # target_columns = ["date_time_administered","", "health_area","enumerator_id", "lat", "long", "duration", "num_outlier_data_points", "int_outlier_total", "row_index"] # target_indices = [4,5,11,14,2861,2862,2868,2899,2902, 0] # column_data = {} # for column in target_columns: # column_data[column] = [] # reader = csv.reader(csvfile, delimiter=",") # for row in reader: # # print(row[0]) # if row[0] == '': # continue # column_data[target_columns[0]].append(f'{row[target_indices[0]]} {row[target_indices[1]]}') # column_data[target_columns[2]].append(row[target_indices[2]]) # column_data[target_columns[3]].append(row[target_indices[3]]) # column_data[target_columns[4]].append(row[target_indices[4]]) # column_data[target_columns[5]].append(row[target_indices[5]]) # column_data[target_columns[6]].append(row[target_indices[6]]) # column_data[target_columns[7]].append(row[target_indices[7]]) # column_data[target_columns[8]].append(row[target_indices[8]]) # column_data[target_columns[9]].append(row[target_indices[9]]) # # print(column_data["row_index"]) # return column_data def data_processing_for_survey_records_from_file(csvf): # csv_test = ("test.csv", csvf) with open(csvf, newline="", mode='r') as csvfile: reader = csv.reader(csv_file, delimiter=",") for row in reader: print("______ IN A ROW",row) break return data_processing_for_survey_records(csvfile) def data_processing_for_survey_records(csv_file): if "\n" in csv_file: print("______if HERE") file_it = csv_file.split("\n") reader = csv.reader(file_it, delimiter=",") else: print("______ELSE HERE") reader = csv.reader(csv_file, delimiter=",") for row in reader: print("______ IN A ROW",row) break # print(csv_file) # print(file_it) # original column headings: ["today_date", "start_tracking_time", "q9","q12", "q5latitude", "q5longitude", "duration_itw", "dk_total", "int_outlier_total"] # target_columns = ["date_time_administered","", "health_area","enumerator_id", "lat", "long", "duration", "num_outlier_data_points", "int_outlier_total", "row_index"] # target_indices = [4,5,11,14,2861,2862,2868,2899,2902, 0] # column_data = {} # health_areas = [] print(reader) for row in reader: print("_________ ROW",row) if len(row) == 0: continue if row[0] == '': continue if row[11] not in health_areas: health_areas.append(row[11]) column_data[row[0]] = { "date_time_administered": f'{row[target_indices[0]]} {row[target_indices[1]]}', "health_area": health_areas.index(row[target_indices[2]])+1, # index value of row[target_indices[2]] in health_areas "enumerator_id": row[target_indices[3]], "lat": row[target_indices[4]], "long": row[target_indices[5]], "duration": row[target_indices[6]], "num_dont_know_responses": row[target_indices[7]], "num_outlier_data_points": row[target_indices[8]] } print(column_data) return column_data def data_processing_for_health_areas(csvf): with open(csvf) as csvfile: health_areas = [] reader = csv.reader(csvfile, delimiter=",") for row in reader: if row[0] == '': continue if row[11] not in health_areas: health_areas.append(row[11]) print(health_areas) return health_areas # data_processing_for_survey_records("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv") # data_processing("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/menage_dash_gps.csv") # data_processing_for_health_areas("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv")
nappernick/envelope
app/redis/__init__.py
<filename>app/redis/__init__.py from .redis_worker import conn
nappernick/envelope
app/models/type.py
<gh_stars>1-10 from .db import db class Type(db.Model): __tablename__ = "types" id = db.Column(db.Integer, primary_key = True) type = db.Column(db.String(40), nullable = False, unique = True) db.relationship("User", backref="type") def to_dict(self): return { "id": self.id, "type": self.type } def name_to_id(self): return { "value": self.id, "label": self.type }
nappernick/envelope
app/api/project_routes.py
import pickle import threading from faker import Faker from datetime import datetime from flask import Blueprint, jsonify, session, request from flask_login import current_user, login_required from datetime import datetime from app.forms import ProjectForm from app.models import Project, User, DataSet, Survey, db from app.api import validation_errors_to_error_messages from .data_processing import data_processing_for_survey_records, process_data_for_reader from sqlalchemy.orm import joinedload fake = Faker() project_routes = Blueprint("projects", __name__) @project_routes.route("/") @login_required def all_projects(): user = db.session.query(User).get(current_user.get_id()).to_dict() if user["type_id"] == 1: projects = db.session.query(Project).options(joinedload(Project.surveys)).all() if user["type_id"] == 2: projects = db.session.query(Project).filter_by(user_id = user["id"]).all() return jsonify([project.to_dict_survey_summary() for project in projects]) @project_routes.route("/", methods=["POST"]) @login_required def post_project(): form = ProjectForm() form['csrf_token'].data = request.cookies['csrf_token'] if form.validate_on_submit(): post_project = threading.Thread(target = async_post, args=[form]) post_project.start() return {"success": "Post was successful"} return {'errors': validation_errors_to_error_messages(form.errors)} # Allow the post request to be truly asynchronous, by closing the connection & running record creation here def async_post(form): from app import app, db with app.app_context(): project = Project( project_name=form.data['project_name'], data_set_id=form.data['data_set_id'], user_id=form.data['user_id'], target_health_area_count=form.data['target_health_area_count'], target_surv_count=form.data['target_surv_count'], created_at=datetime.now(), updated_at=datetime.now() ) db.session.add(project) db.session.commit() project_id = project.id data_set = db.session.query(DataSet.data_set).filter(DataSet.id==form.data['data_set_id']).first() data_set = data_set._asdict() pickle_file = pickle.loads(data_set["data_set"]) surveys = data_processing_for_survey_records(pickle_file) # Lay out the summary fields for projects enumerators = [] health_areas = [] health_area_count = 0 enumerator_count = 0 dont_know_count = 0 outlier_count = 0 sum_duration = float() # as the survey records are built, tabulate summary data for project for survey in surveys.values(): if survey["enumerator_id"] not in enumerators: enumerator_count += 1 enumerators.append(survey["enumerator_id"]) if survey["health_area"] not in health_areas: health_area_count += 1 health_areas.append(survey["health_area"]) dont_know_count += int(float(str(survey["num_dont_know_responses"]))) outlier_count += int(float(str(survey["num_outlier_data_points"]))) sum_duration = sum_duration + float(str(survey["duration"])) # build the survey records & commit to DB survey_seed = Survey( health_area_id=int(survey["health_area"]), project_id=project_id, enumerator_id=survey["enumerator_id"], date_time_administered=survey["date_time_administered"], duration=survey["duration"], respondent=fake.name(), num_outlier_data_points=int(float(str(survey["num_outlier_data_points"]))), num_dont_know_responses=int(float(str(survey["num_dont_know_responses"]))), lat=survey["lat"], long=survey["long"], outside_health_zone=False ) db.session.add(survey_seed) # Now that all survey records have been built, update the project avg_duration = float(sum_duration / len(surveys)) if len(surveys) else 0.00 project = db.session.query(Project).options(joinedload("surveys")).get(project_id) project.survey_count = len(surveys) project.health_area_count = health_area_count project.enumerator_count = enumerator_count project.dont_know_count = dont_know_count project.outlier_count = outlier_count project.avg_duration = avg_duration db.session.commit() return return # Specifically for returning values related to projects for search @project_routes.route("/search") @login_required def search_projects(): projects = db.session.query(Project).all() return jsonify([project.search() for project in projects]) # Get data for a single project @project_routes.route("/<int:id>") @login_required def project(id): project = db.session.query(Project).options(joinedload("surveys")).get(id) return jsonify(project.to_dict_survey_summary()) # Updating a single project @project_routes.route("/<int:id>", methods=["POST"]) @login_required def update_project(id): project = db.session.query(Project).options(joinedload("surveys")).get(id) req = dict(request.json) project.project_name = req['project_name'] project.target_health_area_count=req['target_health_area_count'] project.target_surv_count=req['target_surv_count'] project.updated_at = datetime.now() if project.data_set_id != req['data_set_id']: project.data_set_id = req['data_set_id'] db.session.commit() project = db.session.query(Project).get(id) return jsonify(project.to_dict_survey_summary()) # Deleting a single project @project_routes.route("/<int:projectId>", methods=["DELETE"]) @login_required def delete_project(projectId): project = db.session.query(Project).get(projectId) db.session.delete(project) db.session.commit() return { projectId: "Successfully deleted" } @project_routes.route("/<int:id>/surveys") @login_required def project_surveys(id): project = db.session.query(Project).options(joinedload(Project.surveys)).options(joinedload(Project.user)).get(id) return jsonify(project.to_dict_full())
nappernick/envelope
app/models/user.py
from .db import db from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) first_name = db.Column(db.String(40), nullable = False) last_name = db.Column(db.String(40), nullable = False) username = db.Column(db.String(40), nullable = False, unique = True) email = db.Column(db.String(255), nullable = False, unique = True) hashed_password = db.Column(db.String(255), nullable = False) type_id = db.Column(db.Integer, db.ForeignKey("types.id"), nullable = False) created_at = db.Column(db.DateTime, server_default=db.func.now()) updated_at = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now()) type = db.relationship("Type") projects = db.relationship("Project") @property def password(self): return self.hashed_password @password.setter def password(self, password): self.hashed_password = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password, password) def to_dict(self): return { "id": self.id, "first_name": self.first_name, "last_name": self.last_name, "username": self.username, "email": self.email, "type_id": self.type_id, "created_at": self.created_at, "updated_at": self.updated_at } def to_dict_full(self): return { "id": self.id, "first_name": self.first_name, "last_name": self.last_name, "username": self.username, "email": self.email, "type_id": self.type_id, "created_at": self.created_at, "updated_at": self.updated_at, "type": self.type.type }
nappernick/envelope
app/models/project.py
<reponame>nappernick/envelope from .db import db class Project(db.Model): __tablename__ = "projects" id = db.Column(db.Integer, primary_key = True) project_name = db.Column(db.String(100), nullable = False, unique = True) project_notes = db.Column(db.Text, nullable = True) data_set_id = db.Column(db.Integer, db.ForeignKey("data_sets.id"), nullable = True) user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable = False) target_health_area_count = db.Column(db.Integer, nullable = False) target_surv_count = db.Column(db.Integer, nullable = False) enumerator_count = db.Column(db.Integer, nullable=True) survey_count = db.Column(db.Integer, nullable=True) health_area_count = db.Column(db.Integer, nullable=True) avg_duration = db.Column(db.Float, nullable=True) dont_know_count = db.Column(db.Integer, nullable=True) outlier_count = db.Column(db.Integer, nullable=True) created_at = db.Column(db.DateTime, server_default=db.func.now()) updated_at = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now()) user = db.relationship("User", backref="project", lazy="select") surveys = db.relationship("Survey", cascade="all,delete") data_set = db.relationship("DataSet") def to_dict(self): return { "id": self.id, "project_name": self.project_name, "project_notes": self.project_notes, "data_set_id": self.data_set_id, "user_id": self.user_id, "target_health_area_count": self.target_health_area_count, "target_surv_count": self.target_surv_count, "survey_count": self.survey_count, "health_area_count": self.health_area_count, "enumerator_count": self.enumerator_count, "dont_know_count": self.dont_know_count, "outlier_count": self.outlier_count, "avg_duration": self.avg_duration, "created_at": self.created_at, "updated_at": self.updated_at } def to_dict_full(self): return { "id": self.id, "project_name": self.project_name, "project_notes": self.project_notes, "data_set_id": self.data_set_id, "user_id": self.user_id, "target_health_area_count": self.target_health_area_count, "target_surv_count": self.target_surv_count, "created_at": self.created_at, "updated_at": self.updated_at, "survey_count": self.survey_count, "health_area_count": self.health_area_count, "enumerator_count": self.enumerator_count, "dont_know_count": self.dont_know_count, "outlier_count": self.outlier_count, "avg_duration": self.avg_duration, "user": self.user.to_dict(), "surveys": [survey.to_dict() for survey in self.surveys] } def search(self): return { "value": self.id, "label": self.project_name } def to_dict_survey_summary(self): surveys = [survey.to_dict() for survey in self.surveys] health_areas = [] health_area_survey_count = {} health_area_count = 0 for survey in surveys: if survey["health_area_id"] not in health_area_survey_count: health_area_survey_count[survey["health_area_id"]] = 1 if survey["health_area_id"] in health_area_survey_count: health_area_survey_count[survey["health_area_id"]] += 1 if survey["health_area_id"] not in health_areas: health_area_count += 1 health_areas.append(survey["health_area_id"]) surv_coverage_total = float() for survey_count in health_area_survey_count.values(): surv_coverage_total += survey_count / self.target_surv_count survey_coverage = surv_coverage_total / health_area_count return { "id": self.id, "project_name": self.project_name, "project_notes": self.project_notes, "data_set_id": self.data_set_id, "user_id": self.user_id, "target_health_area_count": self.target_health_area_count, "target_surv_count": self.target_surv_count, "created_at": self.created_at, "updated_at": self.updated_at, "user": self.user.to_dict(), "survey_count": self.survey_count, "health_area_count": self.health_area_count, "health_area_coverage": self.health_area_count / self.target_health_area_count, "survey_coverage": survey_coverage, "enumerator_count": self.enumerator_count, "dont_know_count": self.dont_know_count, "outlier_count": self.outlier_count, "avg_duration": self.avg_duration, }
nappernick/envelope
app/api/survey_routes.py
from flask import Blueprint, jsonify, session, request from flask_login import current_user, login_required from app.models import Survey, db survey_routes = Blueprint("surveys", __name__) @survey_routes.route("/") @login_required def all_surveys(): curr_user = current_user.to_dict() if curr_user["type_id"] != 1: return jsonify({ "errors": [ "Unauthorized" ] }) surveys = db.session.query(Survey).all() return jsonify([survey.to_dict() for survey in surveys])
nappernick/envelope
app/models/__init__.py
from .db import db from .user import User from .type import Type from .data_set import DataSet from .project import Project from .survey import Survey from .health_area import HealthArea from .survey_question import SurveyQuestion
nappernick/envelope
app/models/survey.py
from .db import db from sqlalchemy.orm import load_only class Survey(db.Model): __tablename__ = "surveys" id = db.Column(db.Integer, primary_key = True, index=True) health_area_id = db.Column(db.Integer, db.ForeignKey("health_areas.id"), nullable = False, index=True) project_id = db.Column(db.Integer, db.ForeignKey("projects.id"), nullable = False, index=True) enumerator_id = db.Column(db.String(20), nullable = False, index=True) date_time_administered = db.Column(db.DateTime, nullable = False, index=True) duration = db.Column(db.Float, nullable = False, index=True) respondent = db.Column(db.String(100), nullable = False, index=True) num_outlier_data_points = db.Column(db.Integer, nullable = True, index=True) num_dont_know_responses = db.Column(db.Integer, nullable = True, index=True) outside_health_zone = db.Column(db.Boolean(), index=True) lat = db.Column(db.Float, nullable = False, index=True) long = db.Column(db.Float, nullable = False, index=True) created_at = db.Column(db.DateTime, server_default=db.func.now(), index=True) updated_at = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now(), index=True) project = db.relationship("Project", backref="survey") survey_questions = db.relationship("SurveyQuestion") survey_health_area = db.relationship("HealthArea") def to_dict(self): return { "id": self.id, "health_area_id": self.health_area_id, "project_id": self.project_id, "enumerator_id": self.enumerator_id, "date_time_administered": self.date_time_administered, "duration": self.duration, "respondent": self.respondent, "num_outlier_data_points": self.num_outlier_data_points, "num_dont_know_responses": self.num_dont_know_responses, "outside_health_zone": self.outside_health_zone, "latitude": self.lat, "longtitude": self.long, "created_at": self.created_at, "updated_at": self.updated_at } def to_dict_full(self): return { "id": self.id, "health_area_id": self.health_area_id, "project_id": self.project_id, "enumerator_id": self.enumerator_id, "date_time_administered": self.date_time_administered, "duration": self.duration, "respondent": self.respondent, "num_outlier_data_points": self.num_outlier_data_points, "num_dont_know_responses": self.num_dont_know_responses, "outside_health_zone": self.outside_health_zone, "latitude": self.lat, "longtitude": self.long, "created_at": self.created_at, "updated_at": self.updated_at, "health_area": self.survey_health_area } # Get all of the health areas for a given project @classmethod def get_health_area_ids(cls, projectId): project_health_areas = db.session.query(Survey).filter_by(project_id = projectId)\ .options(load_only(cls.health_area_id)).distinct(cls.health_area_id).all() return [health_area.health_area_id for health_area in project_health_areas]
nappernick/envelope
app/api/data_processing.py
<reponame>nappernick/envelope<gh_stars>1-10 import csv import os import tempfile import pandas as pd def dta_to_pickle(dta): data = pd.io.stata.read_stata(dta) csv_file = data.to_csv() file_final = pickle.dumps(csv_file) return file_final def zip_to_pickle(zip): zipfile_ob = zipfile.ZipFile(file_like_object) file_names = zipfile_ob.namelist() file_names = [file_name for file_name in file_names if not "__MACOSX/." in file_name] files = [zipfile_ob.open(name).read() for name in file_names] file_final = files[0] file_final = pickle.dumps(file_final) return file_final def data_processing_for_survey_records(csv_file): if isinstance(csv_file, list): return process_data_for_reader(csv_file) if "\n" in csv_file: file_it = csv_file.split("\n") reader = csv.reader(file_it, delimiter=",") return process_data_for_reader(reader) if isinstance(csv_file, pd.DataFrame): tp = tempfile.NamedTemporaryFile() tp.write(raw.encode("utf-8")) with open(tp.name, newline="\n", mode='r') as f: df = pd.read_csv(f, header = 0, encoding='latin-1') tp.close() else: with open(csv_file, newline="", mode='r') as f: reader = csv.reader(f, delimiter=",") return process_data_for_reader(reader) def process_data_for_reader(reader): headings = ["today_date", "start_tracking_time", "q9","q12", "q5latitude", "q5longitude", "duration_itw", "dk_total", "int_outlier_total"] target_columns = ["date_time_administered","", "health_area","enumerator_id", "lat", "long", "duration", "num_outlier_data_points", "int_outlier_total", "row_index"] target_indices = [] column_data = {} health_areas = [] for row in reader: if len(row) == 0: continue if row[0] == '' or row[0] == 'SubmissionDate': for value in headings: target_indices.append(row.index(value)) continue if row[11] not in health_areas: health_areas.append(row[11]) column_data[row[0]] = { "date_time_administered": f'{row[target_indices[0]]} {row[target_indices[1]]}', "health_area": health_areas.index(row[target_indices[2]])+1, # index value of row[target_indices[2]] in health_areas "enumerator_id": row[target_indices[3]], "lat": row[target_indices[4]], "long": row[target_indices[5]], "duration": row[target_indices[6]], "num_dont_know_responses": row[target_indices[7]], "num_outlier_data_points": row[target_indices[8]] } return column_data def dataframe_processing_for_survey_records(df): headings_df = pd.DataFrame(["today_date", "start_tracking_time", "q9","q12", "q5latitude", "q5longitude", "duration_itw", "dk_total", "int_outlier_total"]) target_columns_df = pd.DataFrame(["date_time_administered","", "health_area","enumerator_id", "lat", "long", "duration", "num_outlier_data_points", "int_outlier_total", "row_index"]) target_indices_df = pd.DataFrame([]) column_data_df = pd.DataFrame({}) health_areas_df = pd.DataFrame([]) headers = df.head for index,row in df.iterrows(): return def data_processing_for_health_areas(csvf): with open(csvf) as csvfile: health_areas = [] reader = csv.reader(csvfile, delimiter=",") for row in reader: if row[0] == '': continue if row[11] not in health_areas: health_areas.append(row[11]) return health_areas # def data_processing_creating_survey_records_from_file(csvf): # print(csvf) # # csv_test = ("test.csv", csvf) # with open(csvf) as csvfile: # data_processing_creating_survey_records(csvfile) # def data_processing_creating_survey_records(csvfile): # # original column headings: ["today_date", "start_tracking_time", "q9","q12", "q5latitude", "q5longitude", "duration_itw", "dk_total", "int_outlier_total"] # target_columns = ["date_time_administered","", "health_area","enumerator_id", "lat", "long", "duration", "num_outlier_data_points", "int_outlier_total", "row_index"] # target_indices = [4,5,11,14,2861,2862,2868,2899,2902, 0] # column_data = {} # for column in target_columns: # column_data[column] = [] # reader = csv.reader(csvfile, delimiter=",") # for row in reader: # # print(row[0]) # if row[0] == '': # continue # column_data[target_columns[0]].append(f'{row[target_indices[0]]} {row[target_indices[1]]}') # column_data[target_columns[2]].append(row[target_indices[2]]) # column_data[target_columns[3]].append(row[target_indices[3]]) # column_data[target_columns[4]].append(row[target_indices[4]]) # column_data[target_columns[5]].append(row[target_indices[5]]) # column_data[target_columns[6]].append(row[target_indices[6]]) # column_data[target_columns[7]].append(row[target_indices[7]]) # column_data[target_columns[8]].append(row[target_indices[8]]) # column_data[target_columns[9]].append(row[target_indices[9]]) # # print(column_data["row_index"]) # return column_data # data_processing_for_survey_records("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv") # data_processing("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/menage_dash_gps.csv") # data_processing_for_health_areas("/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/seeds/seed_survey.csv")
nappernick/envelope
app/api/envelope_api.py
import pysurveycto import requests import json import pandas as pd # Get data from SurveyCTO API scto = pysurveycto.SurveyCTOObject('envelope', '<EMAIL>', 'Envelope-VisX') raw = scto.get_form_data('NORC-IGA-Endline-Menage') # Change Directory import os os.chdir(r"/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api") cwd = os.getcwd() # Write raw text to CSV file text_file = open("csv.txt", "w") csv = text_file.write(raw) text_file.close() # Import CSV file df = pd.read_csv('csv.txt', header = 0, encoding='latin-1')
nappernick/envelope
migrations/versions/20210124_012014_.py
<gh_stars>1-10 """empty message Revision ID: 26cae0588639 Revises: Create Date: 2021-01-24 01:20:14.782725 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('data_sets', sa.Column('id', sa.Integer(), nullable=False), sa.Column('data_set_name', sa.String(length=100), nullable=False), sa.Column('data_set', sa.LargeBinary(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_data_sets_created_at'), 'data_sets', ['created_at'], unique=False) op.create_index(op.f('ix_data_sets_data_set_name'), 'data_sets', ['data_set_name'], unique=True) op.create_index(op.f('ix_data_sets_id'), 'data_sets', ['id'], unique=False) op.create_index(op.f('ix_data_sets_updated_at'), 'data_sets', ['updated_at'], unique=False) op.create_table('health_areas', sa.Column('id', sa.Integer(), nullable=False), sa.Column('health_area', sa.String(length=100), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('health_area') ) op.create_table('types', sa.Column('id', sa.Integer(), nullable=False), sa.Column('type', sa.String(length=40), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('type') ) op.create_table('users', sa.Column('id', sa.Integer(), nullable=False), sa.Column('first_name', sa.String(length=40), nullable=False), sa.Column('last_name', sa.String(length=40), nullable=False), sa.Column('username', sa.String(length=40), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), sa.Column('hashed_password', sa.String(length=255), nullable=False), sa.Column('type_id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.ForeignKeyConstraint(['type_id'], ['types.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('email'), sa.UniqueConstraint('username') ) op.create_table('projects', sa.Column('id', sa.Integer(), nullable=False), sa.Column('project_name', sa.String(length=100), nullable=False), sa.Column('project_notes', sa.Text(), nullable=True), sa.Column('data_set_id', sa.Integer(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('target_health_area_count', sa.Integer(), nullable=False), sa.Column('target_surv_count', sa.Integer(), nullable=False), sa.Column('enumerator_count', sa.Integer(), nullable=True), sa.Column('survey_count', sa.Integer(), nullable=True), sa.Column('health_area_count', sa.Integer(), nullable=True), sa.Column('avg_duration', sa.Float(), nullable=True), sa.Column('dont_know_count', sa.Integer(), nullable=True), sa.Column('outlier_count', sa.Integer(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.ForeignKeyConstraint(['data_set_id'], ['data_sets.id'], ), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('project_name') ) op.create_table('surveys', sa.Column('id', sa.Integer(), nullable=False), sa.Column('health_area_id', sa.Integer(), nullable=False), sa.Column('project_id', sa.Integer(), nullable=False), sa.Column('enumerator_id', sa.String(length=20), nullable=False), sa.Column('date_time_administered', sa.DateTime(), nullable=False), sa.Column('duration', sa.Float(), nullable=False), sa.Column('respondent', sa.String(length=100), nullable=False), sa.Column('num_outlier_data_points', sa.Integer(), nullable=True), sa.Column('num_dont_know_responses', sa.Integer(), nullable=True), sa.Column('outside_health_zone', sa.Boolean(), nullable=True), sa.Column('lat', sa.Float(), nullable=False), sa.Column('long', sa.Float(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True), sa.ForeignKeyConstraint(['health_area_id'], ['health_areas.id'], ), sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_surveys_created_at'), 'surveys', ['created_at'], unique=False) op.create_index(op.f('ix_surveys_date_time_administered'), 'surveys', ['date_time_administered'], unique=False) op.create_index(op.f('ix_surveys_duration'), 'surveys', ['duration'], unique=False) op.create_index(op.f('ix_surveys_enumerator_id'), 'surveys', ['enumerator_id'], unique=False) op.create_index(op.f('ix_surveys_health_area_id'), 'surveys', ['health_area_id'], unique=False) op.create_index(op.f('ix_surveys_id'), 'surveys', ['id'], unique=False) op.create_index(op.f('ix_surveys_lat'), 'surveys', ['lat'], unique=False) op.create_index(op.f('ix_surveys_long'), 'surveys', ['long'], unique=False) op.create_index(op.f('ix_surveys_num_dont_know_responses'), 'surveys', ['num_dont_know_responses'], unique=False) op.create_index(op.f('ix_surveys_num_outlier_data_points'), 'surveys', ['num_outlier_data_points'], unique=False) op.create_index(op.f('ix_surveys_outside_health_zone'), 'surveys', ['outside_health_zone'], unique=False) op.create_index(op.f('ix_surveys_project_id'), 'surveys', ['project_id'], unique=False) op.create_index(op.f('ix_surveys_respondent'), 'surveys', ['respondent'], unique=False) op.create_index(op.f('ix_surveys_updated_at'), 'surveys', ['updated_at'], unique=False) op.create_table('survey_questions', sa.Column('id', sa.Integer(), nullable=False), sa.Column('survey_id', sa.Integer(), nullable=False), sa.Column('question', sa.String(), nullable=False), sa.Column('value', sa.String(), nullable=False), sa.Column('outside_2_sd', sa.Boolean(), nullable=True), sa.Column('reviewed', sa.Boolean(), nullable=True), sa.Column('standard_deviation', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['survey_id'], ['surveys.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('survey_questions') op.drop_index(op.f('ix_surveys_updated_at'), table_name='surveys') op.drop_index(op.f('ix_surveys_respondent'), table_name='surveys') op.drop_index(op.f('ix_surveys_project_id'), table_name='surveys') op.drop_index(op.f('ix_surveys_outside_health_zone'), table_name='surveys') op.drop_index(op.f('ix_surveys_num_outlier_data_points'), table_name='surveys') op.drop_index(op.f('ix_surveys_num_dont_know_responses'), table_name='surveys') op.drop_index(op.f('ix_surveys_long'), table_name='surveys') op.drop_index(op.f('ix_surveys_lat'), table_name='surveys') op.drop_index(op.f('ix_surveys_id'), table_name='surveys') op.drop_index(op.f('ix_surveys_health_area_id'), table_name='surveys') op.drop_index(op.f('ix_surveys_enumerator_id'), table_name='surveys') op.drop_index(op.f('ix_surveys_duration'), table_name='surveys') op.drop_index(op.f('ix_surveys_date_time_administered'), table_name='surveys') op.drop_index(op.f('ix_surveys_created_at'), table_name='surveys') op.drop_table('surveys') op.drop_table('projects') op.drop_table('users') op.drop_table('types') op.drop_table('health_areas') op.drop_index(op.f('ix_data_sets_updated_at'), table_name='data_sets') op.drop_index(op.f('ix_data_sets_id'), table_name='data_sets') op.drop_index(op.f('ix_data_sets_data_set_name'), table_name='data_sets') op.drop_index(op.f('ix_data_sets_created_at'), table_name='data_sets') op.drop_table('data_sets') # ### end Alembic commands ###
nappernick/envelope
app/models/survey_question.py
from .db import db class SurveyQuestion(db.Model): __tablename__ = "survey_questions" id = db.Column(db.Integer, primary_key = True) survey_id = db.Column(db.Integer, db.ForeignKey("surveys.id"), nullable = False) question = db.Column(db.String(), nullable = False) value = db.Column(db.String(), nullable = False) outside_2_sd = db.Column(db.Boolean()) reviewed = db.Column(db.Boolean()) standard_deviation = db.Column(db.Integer()) survey = db.relationship("Survey", backref="survey_question", lazy="select") def to_dict(self): return { "id": self.id, "survey_id": self.survey_id, "question": self.question, "value": self.value, "outside_2_sd": self.outside_2_sd, "reviewed": self.reviewed, "plausible": self.respondent, "standard_deviation": self.standard_deviation }
iflb/neji-finder-tutti-client-python
example.py
<reponame>iflb/neji-finder-tutti-client-python import sys import asyncio from neji_finder_tutti_client import NejiFinderTuttiClient async def on_response(data): print('Data received! (watch_id: {})'.format(data['last_watch_id'])) print(data) async def on_error(msg): print('on_error', msg) async def main(): mode = sys.argv[1] client = NejiFinderTuttiClient() try: await client.open( works_host='https://dev.neji-finder.tutti.works', market_host='https://dev.neji-finder.tutti.market' ) await client.sign_in( works_params={ 'user_name': 'admin', 'password': '<PASSWORD>' }, market_params={ 'user_id': 'requester1', 'password': '<PASSWORD>' } ) if mode == 'publish': automation_parameter_set_id = sys.argv[2] if len(sys.argv) < 4: print('Usage: python example.py publish <automation_parameter_set_id>') print('') else: sync_id = sys.argv[3] ngid, jid = await client.publish_tasks_to_market( automation_parameter_set_id, sync_id ) elif mode == 'watch_response': if len(sys.argv) < 3: print('Usage: python example.py watch_response <automation_parameter_set_id> <last_watch_id>') print('') else: automation_parameter_set_id = sys.argv[2] last_watch_id = sys.argv[3] if len(sys.argv) == 4 else '+' print('Started watching response...') await client.watch_responses_for_tasks( automation_parameter_set_id, handler = on_response, last_watch_id = last_watch_id ) elif mode == 'test_connection': print('open and sign_in finished') except Exception as e: print(e.resource, e.err.state, e.err.source) if __name__=='__main__': if len(sys.argv)==1: print('Usage: python example.py <mode> <automation_parameter_set_id> <sync_id>') print('Available modes ... "publish", "watch_response"') print('') else: loop = asyncio.get_event_loop() loop.create_task(main()) loop.run_forever()
iflb/neji-finder-tutti-client-python
neji_finder_tutti_client/main.py
<reponame>iflb/neji-finder-tutti-client-python import asyncio from typing import Optional, Tuple, Callable from tutti_client import TuttiClient from ducts_client import Duct from .market_controller import TuttiMarketController class NejiFinderTuttiClientConnectionError(Exception): def __init__(self, resource, err): self.resource = resource self.err = err class NejiFinderTuttiClientEnvironmentError(Exception): def __init__(self, err): self.err = err class NejiFinderTuttiClient: '''ねじファインダークライアントの各種機能を使うための窓口(Facade)となるクラスです。 このオブジェクトがメソッドとして直接管理する操作は以下の2つのみです。 - :meth:`publish_tasks_to_market` ... アノテーション対象データをタスクにし、Tutti.marketへタスク発注する - :meth:`watch_responses_for_tasks` ... タスクへの回答の取得およびプッシュ通知を受ける これに加えて、Tutti.works、Tutti.marketへの個別の操作が必要となる場合は、それぞれ :attr:`tutti` 、 :attr:`market` メンバーから各種メソッドにアクセス可能です。 Attributes: tutti (`tutti_client.TuttiClient (外部リンク)`_): Tutti.worksリソースアクセス用オブジェクト market (TuttiMarketController): Tutti.marketリソースアクセス用オブジェクト .. _tutti_client.TuttiClient (外部リンク): https://iflb.github.io/tutti-client-python/references/facade.html ''' def __init__(self): super() self.tutti = TuttiClient() self.market = TuttiMarketController(Duct()) async def open(self, works_host: Optional[str] = None, market_host: Optional[str] = None) -> None: '''Tutti.works、Tutti.marketのサーバーへ接続します。 NejiFinderTuttiClientのメソッドを呼び出す前に必須の処理です。 内部的には、 :meth:`open_tutti`、:meth:`open_market` を一度に実行します。 Args: works_host: Tutti.worksサーバーのホスト名 market_host: Tutti.marketサーバーのホスト名 ''' tasks = [] if works_host: tasks.append(self.open_works(works_host)) if market_host: tasks.append(self.open_market(market_host)) await asyncio.gather(*tasks) async def open_works(self, host: str) -> None: '''Tutti.worksサーバーへ接続します。 NejiFinderTuttiClientのメソッドを呼び出す前に必須の処理です。 Args: host: Tutti.worksサーバーのホスト名 ''' err = [] async def on_error(event): err.append(event) if host[-1] == '/': host = host[:-1] self.tutti._duct.connection_listener.onerror = on_error await self.tutti.open(host + '/ducts/wsd') await asyncio.sleep(0.1) if err: raise NejiFinderTuttiClientConnectionError('Tutti.works', err[0]) async def open_market(self, host: str) -> None: '''Tutti.marketサーバーへ接続します。 NejiFinderTuttiClientのメソッドを呼び出す前に必須の処理です。 Args: host: Tutti.marketサーバーのホスト名 ''' err = [] async def on_error(event): err.append(event) if host[-1] == '/': host = host[:-1] self.market._duct.connection_listener.onerror = on_error await self.market.open(host + '/ducts/wsd') await asyncio.sleep(0.1) if err: raise NejiFinderTuttiClientConnectionError('Tutti.market', err[0]) def close(self) -> None: '''Tutti.works、Tutti.marketのサーバー接続を切断します。 ''' self.close_works() self.close_market() def close_works(self) -> None: '''Tutti.worksサーバーへの接続を切断します。 ''' self.tutti.close() def close_market(self) -> None: '''Tutti.marketサーバーへの接続を切断します。 ''' self.market.close() async def sign_in(self, works_params: dict, market_params: dict) -> None: '''Tutti.worksおよびTutti.marketへサインインします。 内部的には、:meth:`sign_in_works` および :meth:`sign_in_market` を順に実行します。 Args: works_params: :meth:`sign_in_works` へ渡すキーワード引数をメンバーとして持つdict。 market_params: :meth:`sign_in_market` へ渡すキーワード引数をメンバーとして持つdict。 ''' await self.sign_in_works(**works_params) await self.sign_in_market(**market_params) async def sign_in_market(self, user_id: str, password: str, access_token_lifetime: int = 60*60*24*7*1000) -> None: '''Tutti.marketへサインインします。 Args: user_id: ユーザーID password: <PASSWORD> access_token_lifetime: アクセストークンの有効期限(ミリ秒) ''' await self.market.sign_in(user_id, password, access_token_lifetime) async def sign_in_works(self, user_name: Optional[str] = None, password_hash: Optional[str] = None, access_token: Optional[str] = None, **kwargs) -> None: '''Tutti.worksへサインインします。 引数の渡し方は複数通り存在します。 1. ``access_token`` のみを指定する 2. ``user_name`` と ``password_hash`` を指定する 3. ``user_name`` と ``password`` ( ``kwargs`` として受け取られる)を指定する ※ 1→2→3の順に優先されます。 :meth:`sign_in` においては3が適用されています。 Args: user_name: ユーザー名 password_hash: <PASSWORD>の<PASSWORD>値(<PASSWORD>) access_token: アクセストークン kwargs: ``password`` が指定可能です。 ''' await self.tutti.resource.sign_in(user_name, password_hash, access_token, **kwargs) async def sign_out(self) -> None: '''Tutti.market、Tutti.worksからサインアウトします。 内部的には :meth:`sign_out_market` 、 :meth:`sign_out_works` が順に呼ばれます。 ''' await self.sign_out_market() await self.sign_out_works() async def sign_out_market(self) -> None: '''Tutti.marketからサインアウトします。 ''' await self.market.sign_out() async def sign_out_works(self) -> None: '''Tutti.worksからサインアウトします。 ''' await self.tutti.resource.sign_out() async def publish_tasks_to_market( self, automation_parameter_set_id: str, sync_id: str, ) -> Tuple[str, str]: '''タスクをTutti.marketのJobとして発行します。 Args: automation_parameter_set_id: Tutti.worksにおいて発行されるAutomation Parameter Set ID (Automation ID)。 sync_id: クライアントとの同期用ID Returns: tuple: 生成されたリソースのIDを返します。0番目はTutti.worksのNanotask Group ID、1番目はTutti.marketのJob IDです。 ''' data = await self.tutti._duct.call( self.tutti._duct.EVENT['AUTOMATION_PARAMETER_SET_GET'], { 'automation_parameter_set_id': automation_parameter_set_id, 'access_token': self.tutti.account_info['access_token'] } ) if data and data['content']: aps = data['content'] else: raise Exception(f'Automation parameter set ID "{automation_parameter_set_id}" is not defined') data = await self.tutti._duct.call( self.tutti._duct.EVENT['PLATFORM_PARAMETER_SET_GET'], { 'platform_parameter_set_id': aps['platform_parameter_set_id'], 'access_token': self.tutti.account_info['access_token'] } ) if data: pps = data['content'] else: raise Exception('Platform parameter set ID "{}}" is not defined'.format(aps['platform_parameter_set_id'])) if pps['platform']!='market': raise Exception('Platform parameter set ID "{}" is not set for market'.format(aps['platform_parameter_set_id'])) import time time = int(time.time()) nanotask = { 'id': f'{time}', 'props': { 'sync_id': sync_id, } } ret = await self.tutti.resource.create_nanotasks( project_name = aps['project_name'], template_name = 'NejiFinderApp', nanotasks = [nanotask], tag = '', priority = 100, num_assignable = 0, ) nids = ret['nanotask_ids'] print('Nanotask IDs:', nids) ngid = await self.tutti.resource.create_nanotask_group( #name = f'{student_id}-{video_id}-{time}', name = f'{time}', nanotask_ids = nids, project_name = aps['project_name'], template_name = 'NejiFinderApp', ) print('Nanotask Group ID:', ngid) job_parameter = { 'nanotask_group_ids': [ngid], 'automation_parameter_set_id': automation_parameter_set_id, 'platform_parameter_set_id': aps['platform_parameter_set_id'] } try: int_or_none = lambda x: int(x) if x is not None and x!='' else None res = await self.market.register_job( job_class_id = pps['parameters']['job_class_id'], job_parameter = job_parameter, #description = f'Student ID: {student_id} / Video ID: {video_id}', description = f'created at {time}', num_job_assignments_max = int_or_none(pps['parameters']['num_job_assignments_max']), priority_score = int_or_none(pps['parameters']['priorityScore']), ) if res['success']: jid = res['body'] else: raise Error('Failed to create a job') print('Job Class ID:', pps['parameters']['job_class_id']) print('Job ID:', jid) return ngid, jid except: import traceback traceback.print_exc() async def watch_responses_for_tasks( self, automation_parameter_set_id: str, handler: Callable[[dict], None], last_watch_id: str = '+' ) -> None: '''発注したJobにおいて、Tutti.worksに保存された回答をプッシュ通知で受け取ります。 また、このメソッドの実行時、 :attr:`last_watch_id` よりも後に収集された回答をサーバーから順番に返します。 これは例えば、前回 :meth:`watch_responses_for_tasks` を実行していたプロセスが終了してから、 次回再びプロセスを呼ぶまでの間に既に発行済みのJobにおいて新たな回答があった場合に、それらの回答を次回実行時に まとめて取得するという目的で有効に用いることができます。 Args: automation_parameter_set_id: Tutti.worksで発行されたAutomation Parameter Set ID handler: 回答を受信するたびに実行される(asyncコルーチン)関数。引数を1つとり、回答情報を受け取ります。 この引数はdict型で、 ``last_watch_id`` と ``data`` の2つのキーを持ちます。 last_watch_id: 過去に受け取った回答のWatch IDを指定すると、指定値よりも後のタイムスタンプに相当する 回答を全て返します。Watch IDの形式は ``<ミリ秒タイムスタンプ>-<0始まり通し番号>`` で、 Tutti.worksの内部データベースであるRedisの `Stream ID`_ に準拠します。したがって、この値に ハイフン以降を除いた ``<ミリ秒タイムスタンプ>`` を渡した場合はその時間以降の全ての回答が返され、 またデフォルト値の ``+`` は正の方向の無限大値を指すため、メソッド実行時に過去の回答は返されません。 ``0`` を指定することで 履歴に存在する全ての回答を受け取ることが可能ですが、収集済み回答数が多く なっている場合は受信データが膨大になる可能性があるため、非推奨です。 .. _Stream ID: https://redis.io/topics/streams-intro ''' self.tutti.resource.on('watch_responses_for_automation_parameter_set', handler) await self.tutti.resource.watch_responses_for_automation_parameter_set.send( automation_parameter_set_id = automation_parameter_set_id, last_watch_id = last_watch_id, exclusive = True )
iflb/neji-finder-tutti-client-python
neji_finder_tutti_client/market_controller.py
<gh_stars>0 from typing import Optional import hashlib class TuttiMarketController: '''Tutti.marketに関連する操作を行うオブジェクトです。 現状、必要最低限のメソッド群のみを定義しています。JavaScriptにおいて既に `こちら`_ に定義された 操作のうち、このクラスに実装が必要なものは別途問い合わせてください。 .. _こちら: https://github.com/iflb/tutti-market/blob/a4b6b9054183f761a1692ff9633a84e80d93ea3c/frontend/src/scripts/ducts.js#L142 ''' def __init__(self, duct): self._duct = duct async def open(self, wsd_url: str): '''Tutti.marketサーバーへ接続します。 Args: wsd_url: DUCTSサーバーへ接続するエンドポイント ''' await self._duct.open(wsd_url) async def close(self): '''Tutti.marketサーバーとの接続を切断します。''' await self._duct.close() async def register_job( self, job_class_id: str, job_parameter: Optional[dict] = None, description: Optional[str] = None, num_job_assignments_max: Optional[int] = None, priority_score: Optional[int] = None ) -> dict: '''ジョブを発注します。 Args: job_class_id: ジョブクラスID job_parameter: 発注するジョブへ与えるパラメータ群 description: ジョブの説明文(リクエスタから見えるメモ用) num_job_assignments_max: 収集する回答数上限 priority_score: 優先度。値が小さいほど優先度が高く、優先的にワーカーへ割り当てられます。 ''' data = await self._duct.call(self._duct.EVENT['REGISTER_JOB'], { 'access_token': self.access_token, 'job_class_id': job_class_id, 'job_parameter': job_parameter, 'description': description, 'num_job_assignments_max': num_job_assignments_max, 'priority_score': priority_score }) return data async def sign_in(self, user_id: str, password: str, access_token_lifetime: int): '''Tutti.marketにサインインします。 ''' data = await self._duct.call(self._duct.EVENT['SIGN_IN'], { 'user_id': user_id, 'password_hash': hashlib.sha512(password.encode('ascii')).digest(), 'access_token_lifetime': access_token_lifetime }) self.access_token = data['body']['access_token'] async def sign_out(self): '''Tutti.marketからサインアウトします。 ''' data = await self._duct.call(self._duct.EVENT['SIGN_OUT'], { 'access_token': self.access_token })
thomasxmeng/Simple-neural-network-and-linear-regression-analysis-on-avocado-data-in-US
linearReg.py
<reponame>thomasxmeng/Simple-neural-network-and-linear-regression-analysis-on-avocado-data-in-US<gh_stars>0 import numpy as np class LinearRegression(object): def __init__(self, X, y, theta, lambdaa): # Set number of nodes in input, hidden and output layers. self.X = X self.y = y self.theta = theta self.lambdaa = lambdaa def linearRegCostFunction(self, X, y, theta, lambdaa): ''' Implement forward pass here Arguments --------- X: features batch ''' # Initialize some useful values m = np.size(y); # number of training examples n = np.size(theta); # You need to return the following variables correctly J = 0; grad = np.zeros(np.size(theta)).reshape(n, 1, order='F'); h = np.matmul(X, theta); ### TO prevent y and h from being the broadcasting in the wrong way as: (x,)+(x,1) = (x,x) ### instead of (x+x,1) if(y.shape == (y.shape[0],)): y = y[:, np.newaxis] if(h.shape == (h.shape[0],)): h = h[:, np.newaxis] hError = (h - y); #sumSquaredError = np.matrix.sum(np.power(hError, 2), 1); sumSquaredError = np.sum(np.power(hError, 2)); regTermLeft = (1/(2 * m)) * sumSquaredError; thetaWithoutBias = theta[1:]; sumSquaredTheta = np.sum(np.power(thetaWithoutBias, 2)); regTermRight = (lambdaa / (2 * m)) * sumSquaredTheta; J = regTermLeft + regTermRight; #grad = (1/m) * X' * hError; #grad = (1/m) * np.matmul(X.transpose(),hError.reshape(m,-1)); grad = (1/m) * np.matmul(X.transpose(),hError); #print((lambdaa / m) * thetaWithoutBias.reshape(len(thetaWithoutBias),-1)) grad[1:] += (lambdaa / m) * thetaWithoutBias; #grad = grad.flatten('F'); return J, grad def gradientDescentMulti(self, X, y, theta, alpha, lambdaa, num_iters): m = np.size(y); # number of training examples J_history = np.zeros((num_iters, 1)); n = X.shape[1]; # number of features #theta = theta.reshape(n, 1, order='F'); for iter in range(num_iters): #theta = theta - alpha .* (1./m) .* ((X*theta - y)' * X)'; #predictions = np.matmul(X , theta); #print(np.size(predictions)) #print(np.size((np.substeact(predictions, y))) #updates = np.matmul( X.transpose(), (predictions - y) ); #updates = np.matmul( X.transpose(), (predictions - y) ); #theta = theta - alpha * (1/m) * updates; J_history[iter], grad = self.linearRegCostFunction(X, y, theta, lambdaa); #print("the shape of gradient:") #print(grad.shape) #print("the shape of theta before uodate:") #print(theta.shape) theta = theta.reshape(n,-1) - alpha * grad; #print("the shape of theta after uodate:") #print(theta.shape) return theta, J_history # ######################################################### # # Set your hyperparameters here # ########################################################## # iterations = 5500 # learning_rate = 1 # hidden_nodes = 10 # output_nodes = 1
hermagrini/pochoclo-system
api/models/movimiento_model.py
from google.appengine.ext import ndb from google.appengine.ext.ndb.key import Key from api.lib.custom_handler import improve from api.lib.date_handler import date_handler from api.lib.bottle import json_dumps class Movimiento(ndb.Model): tipo = ndb.StringProperty() monto = ndb.IntegerProperty() date = ndb.DateTimeProperty(auto_now_add=True) cuenta = ndb.StringProperty() @classmethod def put_movimiento_cuenta(self, id_query, tipo, monto): cuenta = Cuenta.get_by_id(id_query) cuenta.movimientos.append(Movimiento(tipo=tipo, monto=monto)) return cuenta.put() @classmethod def put_movimiento_asiento(self,id_query,tipo,monto,cuenta): asiento = Asiento.get_by_id(id_query) asiento.movimientos.append(Movimiento(tipo=tipo, monto=monto,cuenta=cuenta)) return asiento.put() class Asiento(ndb.Model): movimientos = ndb.StructuredProperty(Movimiento,repeated=True) date = ndb.DateTimeProperty(auto_now_add=True) @classmethod def get_all_json(self): return json_dumps([improve(asiento.to_dict(), asiento.key.id()) for asiento in Asiento.query()], default=date_handler) @classmethod def get_id_json(self, id_query): asiento = Asiento.get_by_id(id_query) return json_dumps(improve(asiento.to_dict(),identificador=asiento.key.id()), default=date_handler) @classmethod def put_asiento(self): return Asiento().put() class Cuenta(ndb.Model): idRubro = ndb.StringProperty(required=True) nombre = ndb.StringProperty(required=True) movimientos = ndb.StructuredProperty(Movimiento,repeated=True) @classmethod def get_all_json(self): return json_dumps([improve(cuenta.to_dict(), cuenta.key.id()) for cuenta in Cuenta.query()], default=date_handler) @classmethod def get_id_json(self, id_query): cuenta = Cuenta.get_by_id(id_query) return json_dumps(improve(cuenta.to_dict(),identificador=cuenta.key.id()), default=date_handler) @classmethod def put_cuenta(self, id_query, nombre,idRubro): return Cuenta( id = id_query, idRubro = idRubro, nombre = nombre ).put() @classmethod def remove_cuenta(self, id_query): return Key("Cuenta", id_query).delete()
hermagrini/pochoclo-system
api/lib/custom_handler.py
from google.appengine.ext import ndb def improve(result, identificador = ""): #explota con las sesiones result["id"] = identificador for key, value in result.iteritems(): if type(value) is list: i = 0 for item in value: if isinstance(item,ndb.Key): value[i] = improve(item.get().to_dict()) value[i]["id"] = item.id() elif type(item) is not unicode: value[i] = improve(result = item) i = i + 1 if isinstance(value,ndb.Key): result[key] = improve(result[key].get().to_dict()) result[key]["id"] = value.id() return result
hermagrini/pochoclo-system
api/tipo.py
# coding: utf-8 -- from api.lib import bottle from api.lib.bottle import * from api.models.tipo_model import Tipo from json import dumps def initRoutes(app=None): if not app: app = bottle.default_app() @app.route('/tipo/traerListaTipos', method='POST') #testeado y andando! def getPostList(): return Tipo.get_all_json()#[post.to_dict() for post in Post.query()] @app.route('/tipo/traerTipo', method='POST') #testeado y andando! def getPost(): return Tipo.get_id_json(request.json["id"]) @app.route('/put/tipo', method='POST') #testeado y andando! def putTipo(): Tipo.put_tipo( id_query = request.json["id"], nombre = request.json["nombre"] ) return "Tipo agregado exitosamente!" @app.route('/edit/tipo', method='POST') #testeado y andando (PD: CHAN por ahora es igual a put post)! def editTipo(): Tipo.put_tipo( id_query = request.json["id"], nombre = request.json["nombre"] ) return "Tipo editado exitosamente!" @app.route('/remove/tipo', method='POST') def removeTipo(): #testeado y andando (PD: el metodo era delete, no remove)! Tipo.remove_tipo(request.json["id"]) return "Tipo eliminado exitosamente!"
hermagrini/pochoclo-system
api/rubro.py
# coding: utf-8 -- from api.lib import bottle from api.lib.bottle import * from api.models.rubro_model import Rubro from json import dumps def initRoutes(app=None): if not app: app = bottle.default_app() @app.route('/rubro/traerListaRubros', method='POST') #testeado y andando! def getRubroList(): return Rubro.get_all_json()#[post.to_dict() for post in Post.query()] @app.route('/rubro/traerRubro', method='POST') #testeado y andando! def getRubro(): return Rubro.get_id_json(request.json["id"]) @app.route('/put/rubro', method='POST') #testeado y andando! def putRubro(): Rubro.put_rubro( id_query = request.json["id"], idTipo = request.json["idTipo"], nombre = request.json["nombre"] ) return "Rubro agregado exitosamente!" @app.route('/edit/rubro', method='POST') #testeado y andando (PD: CHAN por ahora es igual a put post)! def editRubro(): Rubro.put_rubro( id_query = request.json["id"], idTipo = request.json["idTipo"], nombre = request.json["nombre"] ) return "Rubro editado exitosamente!" @app.route('/remove/rubro', method='POST') def removeRubro(): #testeado y andando (PD: el metodo era delete, no remove)! Rubro.remove_rubro(request.json["id"]) return "Rubro eliminado exitosamente!"
hermagrini/pochoclo-system
api/asientos.py
# coding: utf-8 -- from api.lib import bottle from api.lib.bottle import * from api.models.movimiento_model import Cuenta, Movimiento, Asiento def initRoutes(app=None): if not app: app = bottle.default_app() @app.route('/cuenta/traerListaCuentas', method='POST') def getCuentaList(): return Cuenta.get_all_json() @app.route('/cuenta/traerCuenta', method='POST') def getCuenta(): return Cuenta.get_id_json(request.json["id"]) @app.route('/put/cuenta', method='POST') def putCuenta(): Cuenta.put_cuenta( id_query = request.json["id"], idRubro = request.json["idRubro"], nombre = request.json["nombre"] ) return "Cuenta agregado exitosamente!" @app.route('/edit/cuenta', method='POST') def editCuenta(): Cuenta.put_cuenta( id_query = request.json["id"], idRubro = request.json["idRubro"], nombre = request.json["nombre"] ) return "Cuenta editado exitosamente!" @app.route('/remove/cuenta', method='POST') def removeCuenta(): Cuenta.remove_cuenta(request.json["id"]) return "Cuenta eliminado exitosamente!" @app.route('/asiento/traerListaAsientos', method='POST') def getAsientoList(): return Asiento.get_all_json() @app.route('/asiento/traerAsiento', method='POST') def getAsiento(): return Asiento.get_id_json(request.json["id"]) @app.route('/put/asiento', method='POST') def putAsiento(): asiento = Asiento.put_asiento() movimientos = request.json['movimientos'] for movimiento in movimientos: cuenta = Cuenta.get_by_id(movimiento['id']) Movimiento.put_movimiento_asiento( id_query = asiento.id(), tipo = movimiento['tipo'], monto = movimiento['monto'], cuenta = cuenta.nombre ) Movimiento.put_movimiento_cuenta( id_query = movimiento['id'], tipo = movimiento['tipo'], monto = movimiento['monto'], ) return "Asiento guardado exitosamente"
hermagrini/pochoclo-system
api/models/tipo_model.py
from google.appengine.ext import ndb from google.appengine.ext.ndb.key import Key from api.lib.custom_handler import improve from api.lib.bottle import json_dumps class Tipo(ndb.Model): nombre = ndb.StringProperty(required=True) @classmethod def get_all_json(self): return json_dumps([improve(tipo.to_dict(), tipo.key.id()) for tipo in Tipo.query()]) @classmethod def get_id_json(self, id_query): tipo = Tipo.get_by_id(id_query) return json_dumps(improve(tipo.to_dict(),identificador=tipo.key.id())) @classmethod def put_tipo(self, id_query, nombre): return Tipo( id = id_query, nombre = nombre ).put() @classmethod def remove_tipo(self, id_query): return Key("Tipo", id_query).delete()
hermagrini/pochoclo-system
api/lib/date_handler.py
<filename>api/lib/date_handler.py def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj
hermagrini/pochoclo-system
api/models/__init__.py
<reponame>hermagrini/pochoclo-system __author__ = 'Hernan'
hermagrini/pochoclo-system
api/lib/__init__.py
<reponame>hermagrini/pochoclo-system<gh_stars>0 __author__ = 'heborlz'
hermagrini/pochoclo-system
main.py
<gh_stars>0 # coding: utf-8 -- from api.lib import bottle from api.lib.bottle import * from api import tipo,asientos,rubro # Si vamos a tener por separado los handlers, aca no deberiamos importar modelos :P # from api.models import * app = bottle.Bottle({'mode': 'development'}) response.content_type = 'application/json' @app.route('/', method='GET') def index(): return static_file('client/index.html', root='./') tipo.initRoutes(app) rubro.initRoutes(app) asientos.initRoutes(app)
Huanyu2019/Seedsortnet
model/seedsortnet.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jan 12 15:44:11 2021 @author: lihuanyu """ import torch.nn as nn import torch import math import torchsummary as summary from torchstat import stat import torch.nn.functional as F from blurpool import BlurPool __all__ = ["seedsortnet","seedsortnet75"] class SubSpace_SFSAM(nn.Module): def __init__(self, nin): super(SubSpace_SFSAM, self).__init__() self.conv_7x7 = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, groups=1) self.bn_point = nn.BatchNorm2d(1, momentum=0.9) self.relu_point = nn.ReLU(inplace=False) self.softmax = nn.Softmax(dim=2) def forward(self, x): out_mean = torch.mean(x, dim=1, keepdim=True) out_max, _ = torch.max(x, dim=1, keepdim=True) out = [out_max, out_mean] out = torch.cat(out,dim=1) out = self.conv_7x7(out) out = self.bn_point(out) out = self.relu_point(out) m, n, p, q = out.shape out = self.softmax(out.view(m, n, -1)) out = out.view(m, n, p, q) out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3]) out = torch.mul(out, x) out = out + x return out class SFSAM(nn.Module): def __init__(self, nin, nout, h, w, num_splits): super(SFSAM, self).__init__() assert nin % num_splits == 0 self.nin = nin self.nout = nout self.h = h self.w = w self.num_splits = num_splits self.subspaces = nn.ModuleList( [SubSpace_SFSAM(int(self.nin / self.num_splits)) for i in range(self.num_splits)] ) def forward(self, x): group_size = int(self.nin / self.num_splits) sub_feat = torch.chunk(x, self.num_splits, dim=1) out = [] for idx, l in enumerate(self.subspaces): out.append(self.subspaces[idx](sub_feat[idx])) out = torch.cat(out, dim=1) return out class BasicConv2d(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels) self.relu6 = nn.ReLU6(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) return self.relu6(x) def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class subsampled(nn.Module): def __init__(self,in_channels,out_channels,filter_size=2,**kwargs): super(subsampled, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=1) self.blurpool=BlurPool(in_channels, filt_size=filter_size, stride=2) def forward(self, x): x = self.maxpool(x) x = self.blurpool(x) return x class Root_module(nn.Module): def __init__(self, in_channels,ch3x3_conv,ch1x1_first,ch3x3,pool_proj): super(Root_module, self).__init__() self.conv1 = BasicConv2d(in_channels, ch3x3_conv, kernel_size=3, stride=1, padding=1) self.branch1 = nn.Sequential( BasicConv2d(ch3x3_conv, ch1x1_first, kernel_size=3,padding=1,stride=2), BasicConv2d(ch1x1_first, ch3x3, kernel_size=1) ) self.branch2 = nn.Sequential( subsampled(16,16) ) def forward(self, x): x = self.conv1(x) branch1 = self.branch1(x) branch2 = self.branch2(x) outputs = [branch1, branch2] return torch.cat(outputs, 1) class shield_block(nn.Module): def __init__(self, inp, oup, expand_ratio,expand_channel): self.identity_map = False super(shield_block, self).__init__() hidden_dim = inp // expand_ratio if hidden_dim < oup / 6.: hidden_dim = math.ceil(oup / 6.) hidden_dim = _make_divisible(hidden_dim, 16) oup1 = math.ceil((oup/6.) * expand_channel) oup2 = oup - oup1 if inp != oup: self.conv = nn.Sequential( nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True), ) if inp == oup: self.identity_map = True self.conv = nn.Sequential( nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(inplace=True), nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.Conv2d(hidden_dim, oup1, 1, 1, 0, bias=False), nn.BatchNorm2d(oup1), nn.ReLU6(inplace=True), nn.Conv2d(oup1, oup1, 3, 1, 1, groups=oup1, bias=False), nn.BatchNorm2d(oup1), ) self.branch1 = nn.Sequential( nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(inplace=True), nn.Conv2d(inp, oup2, 1, 1, 0, bias=False), nn.BatchNorm2d(oup2), nn.ReLU6(inplace=True), ) def forward(self, x): out = self.conv(x) if self.identity_map == True: identity = x branch1 = self.branch1(x) out = [out, branch1] out = torch.cat(out, 1) out += identity return out class Seedsortnet(nn.Module): def __init__(self, num_classes=2, width=1,groups=4,expand_channel=4,init_weights=True): super(Seedsortnet, self).__init__() self.root_module = Root_module(3,16,32,16,16) # [-1, 32, 112, 112] out1 = int(64*width) out2 = int(128 *width) out3 = int(192*width) out4 = int(256 *width) self.stage1_up = shield_block(32,out1,2,1) self.stage1_1 = shield_block(out1,out1,6,expand_channel) self.sfsam1 = SFSAM(out1,out1,112,112, groups) self.translayer1 = subsampled(out1,out1) self.stage2_up = shield_block(out1,out2,2,1) self.stage2_1 = shield_block(out2,out2,6,expand_channel) self.stage2_2 = shield_block(out2,out2,6,expand_channel) self.stage2_3 = shield_block(out2,out2,6,expand_channel) self.sfsam2 = SFSAM(out2,out2,56,56, groups) self.translayer2 = subsampled(out2,out2) self.stage3_up = shield_block(out2,out3,2,1) self.stage3_1 = shield_block(out3,out3,6,expand_channel) self.stage3_2 = shield_block(out3,out3,6,expand_channel) self.stage3_3 = shield_block(out3,out3,6,expand_channel) self.stage3_4 = shield_block(out3,out3,6,expand_channel) self.sfsam3 = SFSAM(out3,out3,28,28,groups) self.translayer3 = subsampled(out3,out3) self.stage4_up = shield_block(out3,out4,2,1) self.stage4_1 = shield_block(out4,out4,6,expand_channel) self.stage4_2 = shield_block(out4,out4,6,expand_channel) self.stage4_3 = shield_block(out4,out4,6,expand_channel) self.sfsam4 = SFSAM(out4,out4,14,14,groups) self.translayer4 = subsampled(out4,out4) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(out4, num_classes)) if init_weights==True: self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats X = stats.truncnorm(-2, 2, scale=0.01) values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) values = values.view(m.weight.size()) with torch.no_grad(): m.weight.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.root_module(x) x = self.stage1_up(x) x = self.stage1_1(x) x = self.sfsam1(x) x = self.translayer1(x) x = self.stage2_up(x) x = self.stage2_1(x) x = self.stage2_2(x) x = self.stage2_3(x) x = self.sfsam2(x) x = self.translayer2(x) x = self.stage3_up(x) x = self.stage3_1(x) x = self.stage3_2(x) x = self.stage3_3(x) x = self.stage3_4(x) x = self.sfsam3(x) x = self.translayer3(x) x = self.stage4_up(x) x = self.stage4_1(x) x = self.stage4_2(x) x = self.stage4_3(x) x = self.sfsam4(x) x = self.translayer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x def seedsortnet(**kwargs): """ Constructs a Seedsortnet model """ return Seedsortnet(**kwargs) def seedsortnet75(**kwargs): """ Constructs a Seedsortnet model """ return Seedsortnet(width=0.75,**kwargs) if __name__=='__main__': model = seedsortnet(groups=4) model.eval() print(model) stat(model,(3, 224, 224))
lopuhin/WSI-LDA
w2v.py
<reponame>lopuhin/WSI-LDA #!/usr/bin/env python import argparse from collections import defaultdict, Counter import numpy as np from gensim.models import Word2Vec import kmeans import utils def word_clusters_neighbours(w2v, word, n_senses, *, window): assert window similar = w2v.most_similar(positive=[word], topn=100) words = np.array([w for w, _ in similar]) word_vectors = np.array([w2v[w] for w in words]) km = kmeans.KMeans(word_vectors, k=n_senses, metric='cosine', verbose=0) return words, km word_clusters_neighbours.threshold = 0.75 def word_clusters_ctx(w2v, word, n_senses, min_weight=1.5, min_count=10, *, window): weights, contexts = utils.weights_contexts(word, window) words = [ w for w, cnt in Counter(w for ctx in contexts for w in ctx).items() if cnt >= min_count and weights.get(w, 0) > min_weight and w in w2v] print(len(words)) w2v_vecs = np.array([w2v[w] for w in words]) km = kmeans.KMeans(w2v_vecs, k=n_senses, metric='cosine', verbose=0) words = np.array(words) return words, km word_clusters_ctx.threshold = 0.55 def run_all(*, clustering, model, word, n_runs, n_senses, window, compact): clustering_fn = globals()['word_clusters_' + clustering] print('threshold', clustering_fn.threshold, sep='\t') w2v = Word2Vec.load(model) words = [word] if word else utils.all_words for word in words: print() print(word) for _ in range(n_runs): words, km = clustering_fn(w2v, word, n_senses, window=window) sense_words = {sense_id: [ (w, w2v.vocab[w].count) for w in words[km.Xtocentre == sense_id]] for sense_id in range(n_senses)} mapping = utils.merge_clusters( km.centres, threshold=clustering_fn.threshold) if not compact: utils.print_senses(sense_words) utils.print_cluster_sim(km.centres) print(mapping) merged_sense_words = defaultdict(list) for sense_id, words in sense_words.items(): merged_sense_words[mapping[sense_id]].extend(words) utils.print_senses(merged_sense_words) def main(): parser = argparse.ArgumentParser() arg = parser.add_argument arg('clustering', choices=['ctx', 'neighbours']) arg('--n-senses', type=int, default=6) arg('--n-runs', type=int, default=1) arg('--model', default='model.pkl') arg('--window', type=int, default=10) arg('--compact', action='store_true') arg('--word') params = vars(parser.parse_args()) print(params) run_all(**params) if __name__ == '__main__': main()
lopuhin/WSI-LDA
run_adagram.py
#!/usr/bin/env python import json import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('filename') args = parser.parse_args() with open(args.filename) as f: data = json.load(f) for word, senses in sorted(data.items()): print() print(word) for idx, sense in enumerate( sorted(senses, key=lambda s: s['weight'], reverse=True)): print(idx, '{:.2f}'.format(sense['weight']), ' '.join(w for w, _, _ in sense['neighbours']), sep='\t') if __name__ == '__main__': main()
lopuhin/WSI-LDA
kmeans.py
# kmeans.py using any of the 20-odd metrics in scipy.spatial.distance # kmeanssample 2 pass, first sample sqrt(N) import random import logging import numpy as np from scipy.spatial.distance import cdist # $scipy/spatial/distance.py # http://docs.scipy.org/doc/scipy/reference/spatial.html from scipy.sparse import issparse # $scipy/sparse/csr.py # X sparse, any cdist metric: real app ? # centres get dense rapidly, metrics in high dim hit distance whiteout # vs unsupervised / semi-supervised svm def kmeans(X, centres, delta=.001, maxiter=10, metric="euclidean", p=2, verbose=1): """ centres, Xtocentre, distances = kmeans(X, initial centres...) in: X N x dim may be sparse centres k x dim: initial centres, e.g. random.sample(X, k) delta: relative error, iterate until the average distance to centres is within delta of the previous average distance maxiter metric: any of the 20-odd in scipy.spatial.distance "chebyshev" = max, "cityblock" = L1, "minkowski" with p= or a function(Xvec, centrevec), e.g. Lqmetric below p: for minkowski metric -- local mod cdist for 0 < p < 1 too verbose: 0 silent, 2 prints running distances out: centres, k x dim Xtocentre: each X -> its nearest centre, ints N -> k distances, N see also: kmeanssample below, class Kmeans below. """ if not issparse(X): X = np.asanyarray(X) # ? centres = centres.todense() if issparse(centres) \ else centres.copy() N, dim = X.shape k, cdim = centres.shape if dim != cdim: raise ValueError("kmeans: X %s and centres %s must have the same number of columns" % ( X.shape, centres.shape)) if verbose: logging.debug( 'kmeans: X %s centres %s delta=%.2g maxiter=%d metric=%s', X.shape, centres.shape, delta, maxiter, metric) allx = np.arange(N) prevdist = 0 for jiter in range(1, maxiter+1): D = cdist_sparse(X, centres, metric=metric, p=p) # |X| x |centres| xtoc = D.argmin(axis=1) # X -> nearest centre distances = D[allx,xtoc] avdist = distances.mean() # median ? if verbose >= 2: logging.debug('kmeans: av |X - nearest centre| = %.4g', avdist) if (1 - delta) * prevdist <= avdist <= prevdist \ or jiter == maxiter: break prevdist = avdist for jc in range(k): # (1 pass in C) c = np.where(xtoc == jc)[0] if len(c) > 0: centres[jc] = X[c].mean(axis=0) if verbose: logging.debug('kmeans: %d iterations cluster sizes: %s', jiter, np.bincount(xtoc)) if verbose >= 2: r50 = np.zeros(k) r90 = np.zeros(k) for j in range(k): dist = distances[ xtoc == j ] if len(dist) > 0: r50[j], r90[j] = np.percentile(dist, (50, 90)) logging.debug('kmeans: cluster 50 %% radius %s', r50.astype(int)) logging.debug('kmeans: cluster 90 %% radius %s', r90.astype(int)) # scale L1 / dim, L2 / sqrt(dim) ? return centres, xtoc, distances def kmeanssample(X, k, nsample=0, **kwargs): """ 2-pass kmeans, fast for large N: 1) kmeans a random sample of nsample ~ sqrt(N) from X 2) full kmeans, starting from those centres """ # merge w kmeans ? mttiw # v large N: sample N^1/2, N^1/2 of that # seed like sklearn ? N, dim = X.shape if nsample == 0: nsample = max(2*np.sqrt(N), 10*k) Xsample = randomsample(X, int(nsample)) pass1centres = randomsample(X, int(k)) samplecentres = kmeans(Xsample, pass1centres, **kwargs)[0] return kmeans(X, samplecentres, **kwargs) def cdist_sparse(X, Y, **kwargs): """ -> |X| x |Y| cdist array, any cdist metric X or Y may be sparse -- best csr """ # todense row at a time, v slow if both v sparse sxy = 2*issparse(X) + issparse(Y) if sxy == 0: if kwargs.get('metric') == 'cosine': # otherwise, there are some NaNs (???) from sklearn.metrics.pairwise import cosine_similarity z = cosine_similarity(X, Y) z *= -1 z += 1 return z else: return cdist(X, Y, **kwargs) d = np.empty((X.shape[0], Y.shape[0]), np.float64) if sxy == 2: for j, x in enumerate(X): d[j] = cdist(x.todense(), Y, **kwargs) [0] elif sxy == 1: for k, y in enumerate(Y): d[:,k] = cdist(X, y.todense(), **kwargs) [0] else: for j, x in enumerate(X): for k, y in enumerate(Y): d[j,k] = cdist(x.todense(), y.todense(), **kwargs) [0] return d def randomsample(X, n): """ random.sample of the rows of X X may be sparse -- best csr """ sampleix = random.sample(range(X.shape[0]), int(n)) return X[sampleix] def nearestcentres(X, centres, metric="euclidean", p=2): """ each X -> nearest centre, any metric euclidean2 (~ withinss) is more sensitive to outliers, cityblock (manhattan, L1) less sensitive """ D = cdist(X, centres, metric=metric, p=p) # |X| x |centres| return D.argmin(axis=1) def Lqmetric(x, y=None, q=.5): # yes a metric, may increase weight of near matches; see... return (np.abs(x - y) ** q).mean() if y is not None \ else (np.abs(x) ** q).mean() class KMeans(object): """ km = KMeans(X, k= or centres=,...) in: either initial centres= for kmeans or k= [nsample=] for kmeanssample out: km.centres, km.Xtocentre, km.distances iterator: for jcentre, J in km: clustercentre = centres[jcentre] J indexes e.g. X[J], classes[J] """ def __init__(self, X, k=0, centres=None, nsample=0, **kwargs): self.X = X if centres is None: self.centres, self.Xtocentre, self.distances = kmeanssample( X, k=k, nsample=nsample, **kwargs) else: self.centres, self.Xtocentre, self.distances = kmeans( X, centres, **kwargs) def __iter__(self): for jc in range(len(self.centres)): yield jc, (self.Xtocentre == jc)
lopuhin/WSI-LDA
tf_embeddings.py
import argparse from pathlib import Path from typing import List import adagram import numpy as np import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector def main(): parser = argparse.ArgumentParser() arg = parser.add_argument arg('model') arg('output') arg('--anchor-words', help='File with anchor words') arg('--anchor-add', type=int, default=500, help='Number of (closest) words to add to anchor words') arg('--freq-start', type=int, default=0) arg('--freq-stop', type=int, default=1000) args = parser.parse_args() model = adagram.VectorModel.load(args.model) if args.anchor_words: words = get_anchor_words(model, args.anchor_words, args.anchor_add) else: words = get_freq_words(model, args.freq_start, args.freq_stop) save_embeddings(model, Path(args.output), words) def get_freq_words(model, freq_start, freq_stop): return model.dictionary.id2word[freq_start:freq_stop] def get_anchor_words( model: adagram.VectorModel, anchor_words_file: str, n_add: int, ) -> List[str]: """ Return anchor words and words closest to them (from n_add closest senses). """ with open(anchor_words_file, 'rt') as f: anchor_words = [line.strip() for line in f] n_senses = model.In.shape[1] dim = model.In.shape[-1] anchor_indices = [] for w in anchor_words: idx = model.dictionary.word2id.get(w) if idx is not None: for sense_idx, _ in model.word_sense_probs(w): anchor_indices.append(idx * n_senses + sense_idx) senses = model.In.reshape(-1, dim) / model.InNorms.reshape(-1)[:, None] anchor_senses = senses[anchor_indices] closenesses = senses @ anchor_senses.T closenesses[np.isnan(closenesses)] = 0 closenesses = closenesses.max(axis=1) add_indices = np.argpartition(closenesses, -n_add)[-n_add:] return list({model.dictionary.id2word[idx // n_senses] for idx in add_indices}) def save_embeddings(model: adagram.VectorModel, output: Path, words: List[str]): labels = [] senses = [] for word in words: for sense, _ in model.word_sense_probs(word): labels.append('{} #{}'.format(word, sense)) v = model.sense_vector(word, sense) senses.append(v / np.linalg.norm(v)) output.mkdir(exist_ok=True) labels_path = output.joinpath('labels.tsv') labels_path.write_text('\n'.join(labels)) senses = np.array(senses) with tf.Session() as session: embedding_var = tf.Variable(senses, trainable=False, name='senses') session.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.save(session, str(output.joinpath('model.ckpt'))) summary_writer = tf.train.SummaryWriter(str(output)) config = projector.ProjectorConfig() embedding = config.embeddings.add() embedding.tensor_name = embedding_var.name embedding.metadata_path = str(labels_path) projector.visualize_embeddings(summary_writer, config) if __name__ == '__main__': main()
lopuhin/WSI-LDA
lda.py
<reponame>lopuhin/WSI-LDA<filename>lda.py #!/usr/bin/env python import logging import argparse import os.path import numpy as np from gensim.models import LdaModel import rl_wsd_labeled from sklearn.metrics import v_measure_score, adjusted_rand_score import utils def word_lda(word, num_topics, window, limit=None): dictionary, corpus, weights_flt = \ utils.prepare_corpus(word, window=window, limit=limit) lda = LdaModel( corpus, id2word=dictionary, num_topics=num_topics, passes=4, iterations=100, alpha='auto') return lda, dictionary, weights_flt def get_scores(lda, dictionary, word, weights_flt, mapping=None): labeled_fname = rl_wsd_labeled.contexts_filename('nouns', 'RuTenTen', word) if os.path.exists(labeled_fname): _senses, contexts = rl_wsd_labeled.get_contexts(labeled_fname) documents = [dictionary.doc2bow(weights_flt(utils.normalize(ctx))) for ctx, _ in contexts] gamma, _ = lda.inference(documents) pred_topics = gamma.argmax(axis=1) if mapping: pred_topics = np.array([mapping[t] for t in pred_topics]) true_labels = np.array([int(ans) for _, ans in contexts]) ari = adjusted_rand_score(true_labels, pred_topics) v_score = v_measure_score(true_labels, pred_topics) return ari, v_score def lda_centers(lda, dictionary): topics = [] for topic_id in range(lda.num_topics): topic = np.zeros(len(dictionary)) topics.append(topic) for idx, v in lda.get_topic_terms(topic_id, topn=len(dictionary)): topic[idx] = v return np.array(topics) def run_all(*, word, n_runs, limit, n_senses, window): words = [word] if word else utils.all_words results_by_word = utils.apply_to_words( word_lda, words, n_runs, num_topics=n_senses, limit=limit, window=window) merge_threshold = 0.2 print('threshold', merge_threshold, sep='\t') aris, v_scores = [], [] for word, results in sorted(results_by_word.items()): print() print(word) word_aris, word_v_scores = [], [] for lda, dictionary, weights_flt in results: sense_words = {sense_id: [ (dictionary[w], v) for w, v in lda.get_topic_terms( sense_id, topn=len(dictionary))] for sense_id in range(lda.num_topics)} utils.print_senses(sense_words) centres = lda_centers(lda, dictionary) utils.print_cluster_sim(centres) mapping = utils.merge_clusters(centres, threshold=merge_threshold) new_centers = {} for old_id, new_id in mapping.items(): if new_id in new_centers: new_centers[new_id] += centres[old_id] else: new_centers[new_id] = centres[old_id] utils.print_senses( {sense_id: [(dictionary.id2token[idx], v) for idx, v in enumerate(center)] for sense_id, center in new_centers.items()}) scores = get_scores(lda, dictionary, word, weights_flt) if scores: ari, v_score = scores print('ARI: {:.3f}, V-score: {:.3f}'.format(ari, v_score)) m_ari, m_v_score = get_scores( lda, dictionary, word, weights_flt, mapping=mapping) print('ARI: {:.3f}, V-score: {:.3f}'.format(m_ari, m_v_score)) word_aris.append(m_ari) word_v_scores.append(m_v_score) if len(word_aris) > 1 or len(word_v_scores) > 1: print('ARI: {:.3f}, V-score: {:.3f}'.format( np.mean(word_aris), np.mean(word_v_scores))) aris.extend(word_aris) v_scores.extend(word_v_scores) if len(aris) > 1 or len(v_scores) > 1: print() print('ARI: {:.3f}, V-score: {:.3f}'.format( np.mean(aris), np.mean(v_scores))) def main(): logging.basicConfig(level=logging.WARNING) parser = argparse.ArgumentParser() arg = parser.add_argument arg('--n-senses', type=int, default=6) arg('--limit', type=int) arg('--n-runs', type=int, default=3) arg('--window', type=int, default=10) arg('--word') params = vars(parser.parse_args()) print(params) run_all(**params) if __name__ == '__main__': main()
lopuhin/WSI-LDA
lsi.py
<reponame>lopuhin/WSI-LDA #!/usr/bin/env python import logging import argparse from gensim.models.lsimodel import LsiModel import utils def word_lsi(word, num_topics, window, limit=None): dictionary, corpus, _ = \ utils.prepare_corpus(word, window=window, limit=limit) lsi = LsiModel(corpus, id2word=dictionary, num_topics=num_topics) return lsi def run_all(*, word, n_runs, limit, n_senses, window): words = [word] if word else utils.all_words results_by_word = utils.apply_to_words( word_lsi, words, n_runs, n_senses=n_senses, limit=limit, window=window) for word, results in sorted(results_by_word.items()): print() print(word) for lsi in results: sense_words = {sense_id: lsi.show_topic(sense_id) for sense_id in range(lsi.num_topics)} utils.print_senses(sense_words) def main(): logging.basicConfig(level=logging.WARNING) parser = argparse.ArgumentParser() arg = parser.add_argument arg('--n-senses', type=int, default=6) arg('--limit', type=int) arg('--n-runs', type=int, default=1) arg('--window', type=int, default=10) arg('--word') params = vars(parser.parse_args()) print(params) run_all(**params) if __name__ == '__main__': main()
lopuhin/WSI-LDA
utils.py
<filename>utils.py import os.path import random import re from functools import partial from concurrent.futures import ProcessPoolExecutor from collections import defaultdict from gensim import corpora import pymystem3 from sklearn.metrics.pairwise import cosine_similarity all_words = [ # multiple dictionary senses 'альбом', 'билет', 'блок', 'вешалка', 'вилка', 'винт', 'горшок', # single sense in the dictionary 'вата', 'бык', 'байка', 'баян', 'бомба', # really single sense 'борщ', 'воск', 'бухгалтер', ] MyStem = pymystem3.Mystem() def load_stopwords(): with open('stopwords.txt') as f: return {line.strip().split()[0] for line in f if line.strip()} stopwords = load_stopwords() def load_contexts(root, word, window=None): with open(os.path.join(root, '{}.txt'.format(word))) as f: contexts = [] for line in f: left, _, right = line.split('\t') left, right = [x.strip().split() for x in [left, right]] if window: left = left[-window:] right = right[:window] ctx = left + right contexts.append( [w for w in ctx if w not in stopwords and w != word]) return contexts def weights_contexts(word, window): weights = load_weights('../corpora/ad-nouns/cdict/', word) contexts = load_contexts('../corpora/ad-nouns-contexts-100k', word, window) return weights, contexts word_re = re.compile(r'\w+', re.U) def normalize(ctx): left, _, right = ctx text = ' '.join([left, right]).strip() text = re.sub(r'\d', '2', text) return [w for w in MyStem.lemmatize(' '.join(word_re.findall(text))) if w not in stopwords and w.strip()] def load_weights(root, word): with open(os.path.join(root, word + '.txt')) as f: return {w: float(weight) for w, weight in (l.split() for l in f)} def print_senses(sense_words, topn=5): for sense_id, words in sorted(sense_words.items()): words = list(words) words.sort(key=lambda x: x[1], reverse=True) print(sense_id, ' '.join(w for w, _ in words[:topn]), sep='\t') def print_cluster_sim(centers): sim_matrix = cosine_similarity(centers, centers) print('\t'.join('{}'.format(j) for j, _ in enumerate(sim_matrix))) for i, row in enumerate(sim_matrix): print('\t'.join( ('{:.2f}'.format(x) if i < j else ' ') for j, x in enumerate(row)), i, sep='\t') def merge_clusters(centers, threshold): ''' Merge clusters that are closer then given threshold. Return mapping: old clusters -> new clusters. ''' sim_matrix = cosine_similarity(centers, centers) mapping = {i: i for i, _ in enumerate(centers)} id_gen = len(mapping) for i, row in enumerate(sim_matrix): for j, sim in enumerate(row): if i > j and sim >= threshold: # merge (i, j) new_id = id_gen id_gen += 1 for id_old in [i, j]: old_new = mapping[id_old] for old, new in list(mapping.items()): if new == old_new: mapping[old] = new_id remap = {new: i for i, new in enumerate(set(mapping.values()))} return {old: remap[new] for old, new in mapping.items()} def weights_flt(weights, min_weight, ctx): return [w for w in ctx if weights.get(w, 0) > min_weight] def prepare_corpus(word, *, window, min_weight=1.0, limit=None): weights, contexts = weights_contexts(word, window) _weights_flt = partial(weights_flt, weights, min_weight) contexts = [ctx for ctx in map(_weights_flt, contexts) if ctx] random.shuffle(contexts) if limit: contexts = contexts[:limit] print(len(contexts)) dictionary = corpora.Dictionary(contexts) corpus = [dictionary.doc2bow(ctx) for ctx in contexts] return dictionary, corpus, _weights_flt def apply_to_words(fn, words, n_runs, **kwargs): futures = [] with ProcessPoolExecutor(max_workers=4) as e: for word in words: futures.extend( (word, e.submit(fn, word, **kwargs)) for _ in range(n_runs)) results_by_word = defaultdict(list) for word, f in futures: results_by_word[word].append(f.result()) return results_by_word
lopuhin/WSI-LDA
hdp.py
#!/usr/bin/env python import logging import argparse from gensim.models import HdpModel import utils def word_hdp(word, *, window, limit=None): dictionary, corpus, _ = \ utils.prepare_corpus(word, window=window, limit=limit) hdp = HdpModel(corpus, id2word=dictionary) return hdp def run_all(*, word, n_runs, limit, n_senses, window): words = [word] if word else utils.all_words results_by_word = utils.apply_to_words( word_hdp, words, n_runs, limit=limit, window=window) for word, results in sorted(results_by_word.items()): print() print(word) for hdp in results: sense_words = dict(hdp.show_topics( topics=n_senses, topn=5, formatted=False)) utils.print_senses(sense_words) def main(): logging.basicConfig(level=logging.WARNING) parser = argparse.ArgumentParser() arg = parser.add_argument arg('--n-senses', type=int, default=6) arg('--limit', type=int) arg('--n-runs', type=int, default=1) arg('--window', type=int, default=10) arg('--word') params = vars(parser.parse_args()) print(params) run_all(**params) if __name__ == '__main__': main()
Egolas/gpu-statistics
mysql_utils.py
import sqlite3 import time from datetime import date, datetime, time from typing import Union import mysql.connector def insert_database(path: str, table: str, data: dict): database = sqlite3.connect(path) keys = ', '.join(data.keys()) holder = ', '.join(['?'] * len(data.keys())) command = f'insert into {table} ({keys}) values ({holder})' values = list(data.values()) try: database.execute(command, values) except Exception as error: print(error) database.commit() database.close() def auto_insert_database(config: dict, data: dict, table: Union[str, None] = None): data = key_to_lower(data) cnx = mysql.connector.connect(**config) cursor = cnx.cursor() # check table table_query = "show tables" cursor.execute(table_query) tables = set(cursor) if (table,) not in tables: auto_create_table(cursor, data, table) # check column else: column_query = f'desc `{table}`' cursor.execute(column_query) columns = set(map(lambda c: c[0].lower(), cursor)) keys = set(data.keys()) diff = keys - columns len_diff = len(diff) if len_diff > 0: auto_add_column(cursor, diff, data, table) # insert data keys = ', '.join(map(lambda k: f'`{k}`', data.keys())) holder = ', '.join(map(lambda k: f'%({k})s' if data[k] is not None else 'null', data.keys())) insert_query = f'insert into `{table}` ({keys}) values ({holder})' data = convert_unknown_type_to_str(data) cursor.execute(insert_query, data) cnx.commit() cursor.close() cnx.close() def key_to_lower(data: dict): new_dict = {} for k, v in data.items(): new_dict[k.lower()] = v return new_dict def convert_unknown_type_to_str(data: dict): new_dict = {} for k, v in data.items(): if v is None: continue new_dict[k] = str(v) if convert_type(type(v)) == 'text' else v return new_dict def convert_type(python_type: type) -> str: if python_type == int: mysql_type = 'int' elif python_type == float: mysql_type = 'double' elif python_type == str: mysql_type = 'text' elif python_type == bool: mysql_type = 'boolean' elif python_type == date: mysql_type = 'date' elif python_type == datetime: mysql_type = 'datetime' elif python_type == time: mysql_type = 'time' else: mysql_type = 'text' return mysql_type def auto_create_table(cursor, data: dict, table: str): field_defines = ','.join(map(lambda item: f'`{item[0]}` {convert_type(type(item[1]))} null', data.items())) create_query = f"create table `{table}`(`table_no` int NOT NULL AUTO_INCREMENT, {field_defines}, " \ f"PRIMARY KEY (`table_no`))" cursor.execute(create_query) def auto_add_column(cursor, diff_keys: set, data: dict, table: str): add_query = "alter table `{}` add `{}` {} null" for key in diff_keys: cursor.execute(add_query.format(table, key, convert_type(type(data[key])))) def get_table_count(config: dict, table: str): cnx = mysql.connector.connect(**config) cursor = cnx.cursor() # check table table_query = f"select max(`table_no`) from `{table}`" cursor.execute(table_query) count = list(cursor)[0] cursor.close() cnx.close() return count def query_table_where_table_no_greater_than(config: dict, table: str, table_no: int): cnx = mysql.connector.connect(**config) cursor = cnx.cursor() query = f"select * from `{table}` where `table_no` > {table_no}" cursor.execute(query) result = list(cursor) cursor.close() cnx.close() return result
Egolas/gpu-statistics
gpu-statistics.py
<reponame>Egolas/gpu-statistics import os import json import time from mysql_utils import auto_insert_database host_name = 'hostname' # input your hostname # input database config database_config = { 'user': 'user', 'password': 'password', 'host': '0.0.0.0', 'database': 'gpustat', 'raise_on_warnings': True, 'auth_plugin': 'caching_sha2_password' } def read_stat(): cmd = 'gpustat -cu --no-color --json' stat = os.popen(cmd).read() stat_json = json.loads(stat) return stat_json def get_records(stats): records = [] timestamp = time.time() for gpu in stats['gpus']: for process in gpu['processes']: record = { 'timestamp':timestamp, 'hostname':host_name, 'username':process['username'], 'memory.usage':process['gpu_memory_usage'], 'gpu.index':gpu['index'], 'gpu.name':gpu['name'], 'gpu.memory.total':gpu['memory.total'] } records.append(record) return records def main(): stats = read_stat() stats_dict = get_records(stats) for record in stats_dict: auto_insert_database(database_config, record, table='gpustatistics') if __name__ == '__main__': main()
Benjamin-Lee/branchpro
branchpro/tests/__init__.py
<gh_stars>1-10 # # Test module for branchpro. # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # # To run all tests, use ``python -m unittest discover``. # # To run a particular test, use e.g. # ``python -m unittest branchpro.tests.test_models``.
Benjamin-Lee/branchpro
branchpro/__init__.py
<reponame>Benjamin-Lee/branchpro # # Root of the branchpro module. # Provides access to all shared functionality (models, simulation, etc.). # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # """branchpro is a Branching Processes modelling library. It contains functionality for modelling, simulating, and visualising the number of cases of infections by day during an outbreak of the influenza virus. """ # Import version info from .version_info import VERSION_INT, VERSION # noqa # Import main classes from .models import ForwardModel # noqa
Benjamin-Lee/branchpro
run-tests.py
# This file contains code for running all tests. # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # import unittest import os import sys import argparse import datetime def run_unit_tests(): """ This function runs our unit tests. """ tests = os.path.join('branchpro', 'tests') tests_suite = unittest.defaultTestLoader.discover(tests, pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests_suite) sys.exit(0 if result.wasSuccessful() else 1) # This function is from the Pints library # https://github.com/pints-team/pints/blob/master/run-tests.py def run_copyright_checks(): """ Checks that the copyright year in LICENSE.md is up-to-date and that each file contains the copyright header """ print('\nChecking that copyright is up-to-date and complete.') year_check = True current_year = str(datetime.datetime.now().year) with open('LICENSE.md', 'r') as license_file: license_text = license_file.read() if 'Copyright (c) ' + current_year in license_text: print("Copyright notice in LICENSE.md is up-to-date.") else: print('Copyright notice in LICENSE.md is NOT up-to-date.') year_check = False # Recursively walk the pints directory and check copyright header is in # each checked file type header_check = True checked_file_types = ['.py'] copyright_header = """# # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. #""" for dirname, subdir_list, file_list in os.walk('branchpro'): for f_name in file_list: if any([f_name.endswith(x) for x in checked_file_types]): path = os.path.join(dirname, f_name) with open(path, 'r') as f: if copyright_header not in f.read(): print('Copyright blurb missing from ' + path) header_check = False if header_check: print('All files contain copyright header.') if not year_check or not header_check: print('FAILED') sys.exit(1) if __name__ == '__main__': # Set up argument parsing parser = argparse.ArgumentParser( description='Run unit test for branchpro', epilog='To run individual unit tests, use e.g.' ' $ python3 branchpro/tests/test_dummy.py', ) # Unit tests parser.add_argument( '--unit', action='store_true', help='Run all unit tests using `python` interpretor.', ) parser.add_argument( '--copyright', action='store_true', help='Check that copyright license info is up to date.', ) # Parse! args = parser.parse_args() # Run tests has_run = False # Unit tests if args.unit: has_run = True run_unit_tests() if args.copyright: has_run = True run_copyright_checks() if not has_run: parser.print_help()
Benjamin-Lee/branchpro
branchpro/tests/test_models.py
<reponame>Benjamin-Lee/branchpro # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # import unittest import branchpro as bp class TestForwardModelClass(unittest.TestCase): """ Test the 'ForwardModel' class. """ def test__init__(self): bp.ForwardModel() def test_simulate(self): forward_model = bp.ForwardModel() with self.assertRaises(NotImplementedError): forward_model.simulate(0, 1)
Benjamin-Lee/branchpro
branchpro/models.py
<filename>branchpro/models.py # # ForwardModel Class # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # class ForwardModel(object): """ForwardModel Class: Base class for the model classes included in the branchpro package. Classes inheriting from ``ForwardModel`` class can implement the methods directly in Python. Methods ------- simulate: return model output for specified parameters and times. """ def __init__(self): super(ForwardModel, self).__init__() def simulate(self, parameters, times): """ Runs a forward simulation with the given ``parameters`` and returns a time-series with data points corresponding to the given ``times``. Returns a sequence of length ``n_times`` (for single output problems) or a NumPy array of shape ``(n_times, n_outputs)`` (for multi-output problems), representing the values of the model at the given ``times``. Parameters ---------- parameters An ordered sequence of parameter values. times The times at which to evaluate. Must be an ordered sequence, without duplicates, and without negative values. All simulations are started at time 0, regardless of whether this value appears in ``times``. """ raise NotImplementedError
Benjamin-Lee/branchpro
branchpro/version_info.py
<filename>branchpro/version_info.py # # Version information for pkmodel. # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # # See: https://packaging.python.org/guides/single-sourcing-package-version/ # # This file is part of BRANCHPRO # (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released # under the BSD 3-clause license. See accompanying LICENSE.md for copyright # notice and full license details. # # Version as a tuple (major, minor, revision) # - Changes to major are rare # - Changes to minor indicate new features, possible slight backwards # incompatibility # - Changes to revision indicate bugfixes, tiny new features VERSION_INT = 0, 0, 1 # String version of the version number VERSION = '.'.join([str(x) for x in VERSION_INT])
angelosalton/pySGS
sgs/common.py
<reponame>angelosalton/pySGS<filename>sgs/common.py """ Shared functions. """ from datetime import datetime import locale import re from typing import Union import os LRU_CACHE_SIZE = 32 MAX_ATTEMPT_NUMBER = 5 def to_datetime(date_string: str, language: str) -> Union[datetime, str]: """ Converts a date string to a datetime object """ locales = {"pt": "pt_BR.utf-8", "en": "en_US.utf-8"} """ correct problem with locale in Windows platform """ if os.name == 'nt': locales = {"pt": "Portuguese_Brazil.1252", "en": "Portuguese_Brazil.1252"} locale.setlocale(locale.LC_TIME, locales[language]) dd_mm_aaaa = "%d/%m/%Y" mmm_aaaa = "%b/%Y" formats = [dd_mm_aaaa, mmm_aaaa] for fmt in formats: try: date = datetime.strptime(date_string, fmt) break except ValueError: continue else: yyyy = "[0-9]{4}" if re.match(yyyy, date_string): year = int(date_string) month = 12 day = 31 date = datetime(year, month, day) else: return date_string # returns original value if cant parse return date
Yu-Chuan/Survival-and-death-model
survial_death_model_conc.py
# -*- coding: utf-8 -*- """ Created on Fri Nov 27 15:32:42 2020 @author: b308 <NAME> """ import numpy as np import matplotlib.pyplot as plt import os #variable name si = ['TNF', 'TNFR1', 'TNFR1a' , 'TRADD', 'TNFR1a_TRADD', 'TRAF2', 'early_complex', 'RIPK1','early_complex_RIPK1', 'IKK', 'early_complex_RIPK1_IKK', 'IKKa', 'IκB_NFκB', 'IκB_NFκB_IKKa', 'IκBp', 'NFκB', 'FADD', 'early_complex_RIPK1_FADD', 'TRADD_TRAF2_RIPK1_FADD', 'Caspase8', 'TRADD_TRAF2_RIPK1_FADD_Caspase8', 'Caspase8a', 'Caspase3', 'Caspase8a_Caspase3', 'Caspase3a', 'DNA_fragmentation', 'cIAP', 'Caspase3a_cIAP' ,'DNA', 'Caspase3a_DNA', 'IκB'] #%% create data fold path = 'D:/Yu-Chuan/ForYuChuan/python program/survial_death_model/ODE/conc/' def createFold(f_dir): try: os.makedirs(f_dir) except FileExistsError: print("The directory has been created on %s" % f_dir) except OSError: print ("Creation of the directory %s failed" % f_dir) else: print ("Successfully created the directory %s" % f_dir) # f_dir = path + folder f_dir = path + 'result/' createFold(f_dir) # npy saving save_name = path + 'SDM_ODE_conc' #%% kinetic Parameter and initial value k1 = 0.185 *1e-3 # sce-1*nM-1 k2 = 0.00125 *1e-3 k3 = 0.185 *1e-3 # sce-1*nM-1 k4 = 0.00125 *1e-3 k5 = 0.185 *1e-3 # sce-1*nM-1 k6 = 0.00125 *1e-3 k7 = 0.185 *1e-3 # sce-1*nM-1 k8 = 0.00125 *1e-3 k9 = 0.185 *1e-3 # sce-1*nM-1 k10 = 0.00125 *1e-3 k11 = 0.37 *1e-3 k12 = 0.014 *1e-3 # sce-1*nM-1 k13 = 0.00125 *1e-3 k14 = 0.37 *1e-3 k15 = 0.185 *1e-3 # sce-1*nM-1 k16 = 0.00125 *1e-3 k17 = 0.37 *1e-3 k18 = 0.5 *1e-3 # sce-1*nM-1 k19 = 0.2 *1e-3 k20 = 0.1 *1e-3 k21 = 0.1 *1e-3 # sce-1*nM-1 k22 = 0.06 *1e-3 k23 = 100 *1e-3 k24 = 0.185 *1e-3 # sce-1*nM-1 k25 = 0.00125 *1e-3 k26 = 0.37 *1e-3 k27 = 0.37 *1e-3 k28 = 0.5 *1e-3 # sce-1*nM-1 k29 = 750 *1e-3 # sce-1*nM-1 p = 1.75 *1e-3 #initial value a = 10. TNF = a #1 TNFR1 = 100. #2 TNFR1a = 0. #3 TRADD = 150. #4 TNFR1a_TRADD = 0. #5 TRAF2 = 100. #6 early_complex = 0. #7 TNFR1a_TRADD_TRAF2 RIPK1 = 100. #8 early_complex_RIPK1 = 0. #9 # early complex IKK = 100. #10 early_complex_RIPK1_IKK = 0. #11 # survival complex IKKa = 0. #12 IκB_NFκB = 250. #13 IκB_NFκB_IKKa = 0. #14 IκBp = 0. #15 NFκB = 0. #16 FADD = 100. #17 early_complex_RIPK1_FADD = 0. #18 TRADD_TRAF2_RIPK1_FADD = 0. #19 (compleII) Caspase8 = 80. #20 TRADD_TRAF2_RIPK1_FADD_Caspase8 = 0. #21 Caspase8a = 0. #22 Caspase3 = 200. #23 Caspase8a_Caspase3 = 0. #24 Caspase3a = 0. #25 DNA_fragmentation = 0. #26 cIAP = 0. #27 Caspase3a_cIAP = 0. #28 DNA = 800. #29 Caspase3a_DNA = 0. #30 IκB = 0. #31 #%% functions def stoichoi_M (var): rxn = 2*var V = np.zeros((var,rxn)) for i in range(var): V[i, 2*i] = 1 V[i, 2*i+1] = -1 return V def model(P, dt, NFkB_delay): [TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa, IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3, Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP, DNA, Caspase3a_DNA, IκB ] = P NFkB_delay = NFkB_delay D = np.array([ # TNF c1 k2*TNFR1a, k1*TNF*TNFR1, # TNFR1 c2 (k2*TNFR1a + k17*early_complex_RIPK1_FADD + k11* early_complex_RIPK1_IKK)*10**(-1.7), k1*TNF*TNFR1a, #TNFR1a c3 k1*TNF*TNFR1 + k4* TNFR1a_TRADD, k2*TNFR1a + k3* TNFR1a * TRADD, # TRADD c4 k4* TNFR1a_TRADD + k11* early_complex_RIPK1_IKK + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, k3* TNFR1a* TRADD, # TNFR1a_TRADD c5 k3* TNFR1a* TRADD + k6* early_complex, k4* TNFR1a_TRADD + k5* TNFR1a_TRADD* RIPK1, # RIPK1 c6 k6* early_complex + k11* early_complex_RIPK1_IKK + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, k5* TNFR1a_TRADD * RIPK1, # early_complex c7 k5* TNFR1a_TRADD * RIPK1 + k8* early_complex_RIPK1, k6* early_complex + k7* early_complex* RIPK1, # RIPK1 c8 k8* early_complex_RIPK1 + k11* early_complex_RIPK1_IKK+ k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, k7* early_complex* RIPK1, # early_complex_RIPK1 c9 k7* early_complex* RIPK1 + k10* early_complex_RIPK1_IKK + k16* early_complex_RIPK1_FADD, k8* early_complex_RIPK1 + k9* early_complex_RIPK1*IKK + k15* early_complex_RIPK1 * FADD, # IKK c10 k10* early_complex_RIPK1_IKK + k14* IκB_NFκB_IKKa, k9* early_complex_RIPK1* IKK, # early_complex_RIPK1_IKK c11 k9* early_complex_RIPK1* IKK, k10* early_complex_RIPK1_IKK + k11* early_complex_RIPK1_IKK, # IKKa c12 k11* early_complex_RIPK1_IKK + k13* IκB_NFκB_IKKa, k12* IKKa * IκB_NFκB, # IκB_NFκB c13, k13* IκB_NFκB_IKKa + k29* NFκB * IκB, k12* IKKa * IκB_NFκB, # IκB_NFκB_IKKa c14 k12* IKKa * IκB_NFκB, k13* IκB_NFκB_IKKa + k14* IκB_NFκB_IKKa, # IκBp c15 k14* IκB_NFκB_IKKa, 0, # NFκB c16 k14* IκB_NFκB_IKKa, k29* NFκB* IκB, # FADD c17 k16* early_complex_RIPK1_FADD + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, k15* early_complex_RIPK1* FADD, # early_complex_RIPK1_FADD c18 k15* early_complex_RIPK1* FADD, k16* early_complex_RIPK1_FADD + k17* early_complex_RIPK1_FADD, # TRADD_TRAF2_RIPK1_FADD c19 k17* early_complex_RIPK1_FADD + k19* TRADD_TRAF2_RIPK1_FADD_Caspase8, k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, # Caspase8 c20 k19* TRADD_TRAF2_RIPK1_FADD_Caspase8, k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, # TRADD_TRAF2_RIPK1_FADD_Caspase8 c21 k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, k19* TRADD_TRAF2_RIPK1_FADD_Caspase8 + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, # Caspase8a c22 k20* TRADD_TRAF2_RIPK1_FADD_Caspase8 + k22* Caspase8a_Caspase3 + k23 * Caspase8a_Caspase3, k21* Caspase8a* Caspase3, # Caspase3 c23 k22* Caspase8a_Caspase3 + k26* Caspase3a_DNA, k21* Caspase8a* Caspase3, # Caspase8a_Caspase3 c24 k21* Caspase8a* Caspase3, k22* Caspase8a_Caspase3 + k23* Caspase8a_Caspase3, # Caspase3a c25 k23* Caspase8a_Caspase3 + k25* Caspase3a_DNA, k28*cIAP* Caspase3a + k24* DNA* Caspase3a, # DNA_fragmentation c26 k26* Caspase3a_DNA, 0, # cIAP c27 p* NFkB_delay, k28* cIAP* Caspase3a, # Caspase3a_cIAP c28 k28* cIAP* Caspase3a, 0, # DNA c29 k25* Caspase3a_DNA, k24* Caspase3a* DNA, # Caspase3a_DNA c30 k24* Caspase3a* DNA, k25* Caspase3a_DNA + k26* Caspase3a_DNA, # IkB c31 p* NFkB_delay, k29* NFκB* IκB ]).reshape(2*var,1) VD = np.matmul(V,D)*dt return VD.reshape(len(P)) # Euler method def euler_claculate(model, P, dt, delay_time): P[:,0] = x0 delay_index = int(delay_time/dt) for i in range(t_step): NFkB_delay = P[15][max(0,i-delay_index)] P[:,i+1] = P[:,i] + model(P[:,i], dt ,NFkB_delay) return P #%% program runnig # varibles x0 = np.array([ TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa, IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3, Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP ,DNA, Caspase3a_DNA, IκB ]) # initial condition t = 3600*12 t_step = 100000 t_interval = np.linspace(0, t, t_step) dt = t_interval[-1]- t_interval[-2] X = np.zeros((len(x0), t_step+1)) var = len(x0) X[:,0] = x0 V = stoichoi_M(var) delay_time = 60*20 # calculate the result result = euler_claculate(model, X, dt, delay_time) np.save(save_name, result) #%% plot process XX = result[:, :-1] for i , x in enumerate(XX) : plt.figure(figsize=(8.5,6), linewidth = 1.5) plt.plot(t_interval/60, x) #plt.legend([si[i]]) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('concentration (nM)', fontsize = 18) file_name = f_dir + si[i] + '.png' plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.savefig(file_name , dpi= 1500)
Yu-Chuan/Survival-and-death-model
survival_death_model_CLE_multiple.py
<filename>survival_death_model_CLE_multiple.py # -*- coding: utf-8 -*- """ Created on Fri Nov 27 15:32:42 2020 @author: b308 """ import numpy as np import multiprocessing as mp from functools import partial import time si = ['TNF', 'TNFR1', 'TNFR1a' , 'TRADD', 'TNFR1a_TRADD', 'TRAF2', 'early_complex', 'RIPK1','early_complex_RIPK1', 'IKK', 'early_complex_RIPK1_IKK', 'IKKa', 'IκB_NFκB', 'IκB_NFκB_IKKa', 'NFκB', 'FADD', 'early_complex_RIPK1_FADD', 'TRADD_TRAF2_RIPK1_FADD', 'Caspase8', 'TRADD_TRAF2_RIPK1_FADD_Caspase8', 'Caspase8a', 'Caspase3', 'Caspase8a_Caspase3', 'Caspase3a', 'cIAP', 'Caspase3a_cIAP' , 'IκB'] #%% kinetic Parameter and initial value # 1nM in 1 pl = 600 molecules nM2molecule = 600. k1 = 0.185 *1e-3 /nM2molecule # sce-1*nM-1 k2 = 0.00125 *1e-3 k3 = 0.185 *1e-3 /nM2molecule # sce-1*nM-1 k4 = 0.00125 *1e-3 k5 = 0.185 *1e-3 /nM2molecule # sce-1*nM-1 k6 = 0.00125 *1e-3 k7 = 0.185 *1e-3 /nM2molecule# sce-1*nM-1 k8 = 0.00125 *1e-3 k9 = 0.185 *1e-3 /nM2molecule # sce-1*nM-1 k10 = 0.00125 *1e-3 k11 = 0.37 *1e-3 k12 = 0.014 *1e-3 /nM2molecule# sce-1*nM-1 k13 = 0.00125 *1e-3 k14 = 0.37 *1e-3 k15 = 0.185 *1e-3 /nM2molecule# sce-1*nM-1 k16 = 0.00125 *1e-3 k17 = 0.37 *1e-3 k18 = 0.5 *1e-3 /nM2molecule # sce-1*nM-1 k19 = 0.2 *1e-3 k20 = 0.1 *1e-3 k21 = 0.1 *1e-3 /nM2molecule# sce-1*nM-1 k22 = 0.06 *1e-3 k23 = 100 *1e-3 k24 = 0.185 *1e-3 /nM2molecule # sce-1*nM-1 k25 = 0.00125 *1e-3 k26 = 0.37 *1e-3 k27 = 0.37 *1e-3 k28 = 0.5 *1e-3 /nM2molecule # sce-1*nM-1 k29 = 750 *1e-3 /nM2molecule # sce-1*nM-1 p = 1.75 *1e-3 #initial value a = 10. TNF = a *nM2molecule #1 TNFR1 = 100. *nM2molecule#2 TNFR1a = 0. #3 TRADD = 150. *nM2molecule#4 TNFR1a_TRADD = 0. #5 TRAF2 = 100. *nM2molecule#6 early_complex = 0. #7 TNFR1a_TRADD_TRAF2 RIPK1 = 100.*nM2molecule #8 early_complex_RIPK1 = 0. #9 # early complex IKK = 100. *nM2molecule#10 early_complex_RIPK1_IKK = 0. #11 # survival complex IKKa = 0. #12 IκB_NFκB = 250.*nM2molecule #13 IκB_NFκB_IKKa = 0. #14 IκBp = 0. #15 NFκB = 0. #16 FADD = 100. *nM2molecule#17 early_complex_RIPK1_FADD = 0. #18 TRADD_TRAF2_RIPK1_FADD = 0. *nM2molecule #19 (compleII) Caspase8 = 80. *nM2molecule#20 TRADD_TRAF2_RIPK1_FADD_Caspase8 = 0. #21 Caspase8a = 0. #22 Caspase3 = 200.*nM2molecule#23 Caspase8a_Caspase3 = 0. #24 Caspase3a = 0. #25 DNA_fragmentation = 0. #26 cIAP = 0. #27 Caspase3a_cIAP = 0. #28 DNA = 800.*nM2molecule#29 Caspase3a_DNA = 0. #30 IκB = 0. #31 #%% functions def stoichoi_M (Z, eq_n, Rxn): V = np.zeros([len(Z), Rxn]) j = 0 for i, e in enumerate(eq_n): V[i, j: j + sum(e)] = np.array([1]*e[0] + [-1]*e[1]) j += sum(e) return V def fixedNoise (Rxn, swap): I1 = np.eye(Rxn) I2 = np.copy(I1) for s in swap: I2[s[0]] = I1[s[1]] return I2 def model(P, dt, NFkB_delay): [TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa, IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3, Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP, DNA, Caspase3a_DNA, IκB ] = P # translation time-delay of IkB & cIAP NFkB_delay = NFkB_delay # propensity array D = np.array([ # TNF c1 k2*TNFR1a, ## decay k1*TNF*TNFR1, # TNFR1 c2 k2*TNFR1a, k17*early_complex_RIPK1_FADD, k11* early_complex_RIPK1_IKK, ## decay k1*TNF*TNFR1a, #TNFR1a c3 k1*TNF*TNFR1, k4* TNFR1a_TRADD, ## decay k2*TNFR1a, k3*TNFR1a * TRADD, # TRADD c4 k4* TNFR1a_TRADD, k11* early_complex_RIPK1_IKK, k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k3* TNFR1a* TRADD, # TNFR1a_TRADD c5 k3* TNFR1a* TRADD, k6* early_complex, ## decay k4* TNFR1a_TRADD, k5* TNFR1a_TRADD* RIPK1, # TRAF2 c6 k6* early_complex, k11* early_complex_RIPK1_IKK, k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k5* TNFR1a_TRADD * TRAF2, # early_complex c7 k5* TNFR1a_TRADD * RIPK1, k8* early_complex_RIPK1, ## decay k6* early_complex, k7* early_complex* RIPK1, # RIPK1 c8 k8* early_complex_RIPK1, k11* early_complex_RIPK1_IKK, k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k7* early_complex* RIPK1, # early_complex_RIPK1 c9 k7* early_complex* RIPK1, k10* early_complex_RIPK1_IKK, k16* early_complex_RIPK1_FADD, ## decay k8* early_complex_RIPK1, k9* early_complex_RIPK1*IKK, k15* early_complex_RIPK1 * FADD, # IKK c10 k10* early_complex_RIPK1_IKK, k14* IκB_NFκB_IKKa, ## decay k9* early_complex_RIPK1* IKK, # early_complex_RIPK1_IKK c11 k9* early_complex_RIPK1* IKK, ## decay k10* early_complex_RIPK1_IKK, k11* early_complex_RIPK1_IKK, # IKKa c12 k11* early_complex_RIPK1_IKK/2, k13* IκB_NFκB_IKKa, ## decay k12* IKKa * IκB_NFκB, # IκB_NFκB c13, k13* IκB_NFκB_IKKa, k29* NFκB * IκB, ## decay k12* IKKa * IκB_NFκB, # IκB_NFκB_IKKa c14 k12* IKKa * IκB_NFκB, ## decay k13* IκB_NFκB_IKKa, k14* IκB_NFκB_IKKa, # IκBp c15 k14* IκB_NFκB_IKKa, ## decay 0, # NFκB c16 k14* IκB_NFκB_IKKa, ## decay k29* NFκB* IκB, # FADD c17 k16* early_complex_RIPK1_FADD, k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k15* early_complex_RIPK1* FADD, # early_complex_RIPK1_FADD c18 k15* early_complex_RIPK1* FADD, ## decay k16* early_complex_RIPK1_FADD, k17* early_complex_RIPK1_FADD, # TRADD_TRAF2_RIPK1_FADD c19 k17* early_complex_RIPK1_FADD, k19* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, # Caspase8 c20 k19* TRADD_TRAF2_RIPK1_FADD_Caspase8, ## decay k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, # TRADD_TRAF2_RIPK1_FADD_Caspase8 c21 k18* TRADD_TRAF2_RIPK1_FADD* Caspase8, ## decay k19* TRADD_TRAF2_RIPK1_FADD_Caspase8, k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, # Caspase8a c22 k20* TRADD_TRAF2_RIPK1_FADD_Caspase8, k22* Caspase8a_Caspase3, k23 * Caspase8a_Caspase3, ## decay k21* Caspase8a* Caspase3, # Caspase3 c23 k22* Caspase8a_Caspase3, k26* Caspase3a_DNA, ## decay k21* Caspase8a* Caspase3, # Caspase8a_Caspase3 c24 k21* Caspase8a* Caspase3, ## decay k22* Caspase8a_Caspase3, k23* Caspase8a_Caspase3, # Caspase3a c25 k23* Caspase8a_Caspase3, k25* Caspase3a_DNA, ## decay k28*cIAP* Caspase3a, k24* DNA* Caspase3a, # DNA_fragmentation c26 k26* Caspase3a_DNA, ## decay 0, # cIAP c27 p* NFkB_delay, ## decay k28* cIAP* Caspase3a, # Caspase3a_cIAP c28 k28* cIAP* Caspase3a, ## decay 0, # DNA c29 k25* Caspase3a_DNA, ## decay k24* Caspase3a* DNA, # Caspase3a_DNA c30 k24* Caspase3a* DNA, ## decay k25* Caspase3a_DNA, k26* Caspase3a_DNA, # IkB c31 p* NFkB_delay, ## decay k29* NFκB* IκB ]).reshape(Rxn,1) sqdt = np.sqrt(dt) G = np.sqrt(D) N = np.random.randn(Rxn).reshape(Rxn,1) VD = np.matmul(V,D)*dt +np.matmul(V,G*sqdt*N) return VD.reshape(len(P)) def main(model, P, dt, delay_time): P[:,0] = x0 delay_index = int(delay_time/dt) for i in range(t_step): NFkB_delay = P[15][max(0,i-delay_index)] change = np.array([P[:,i] + model(P[:,i], dt ,NFkB_delay)]) change[np.where(change <= 0)] = 0 P[:,i+1] = change return P def async_multicore(main, pool_n): pool = mp.Pool(processes = pool_n) # Open multiprocessing pool result = [] #do computation for i in range(n_step): res = pool.apply_async(main, args = (model, X, dt, delay_time,)) result.append(res) pool.close() pool.join() return result def select_min(data, t_interval, t_step, slices): t_n = np.arange(0, t_step, slices) t_new = t_interval[t_n] d_new = [] for d in data: dd = d[:, t_n] d_new.append(dd) return t_new, d_new # initial condition x0 = np.array([ TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa, IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3, Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP ,DNA, Caspase3a_DNA, IκB ]) eq_n = np.array([[1,1], [3,1], [2,2], [3,1], [2,2], [3,1], [2,2], [3,1], [3,3], [2,1], [1,2], [2,1], [2,1], [1,2], [1,1], [1,1], [2,1], [1,2], [2,1], [1,1], [1,2], [3,1], [2,1], [1,2], [2,2], [1,1], [1,1], [1,1], [1,1], [1,2], [1,1]]) swap = np.array([[2,0], [5,1], [6,1], [8,0], [13, 9], [10, 7], [11, 4], [14, 9], [16, 7], [21, 17], [18, 15], [19, 4], [20, 12], [22, 17], [24, 15], [29,25], [26,23], [27, 4], [28, 12], [30, 25], [33, 23], [38, 34], [36,31], [39,34], [40, 31], [41, 4], [42, 4], [45, 43], [48, 47], [49, 43], [50, 37], [51, 37], [52, 37], [53, 46], [56, 35], [57, 35], [58, 54], [60, 59], [64, 62], [63, 61], [65, 62], [66, 61], [67, 55], [68, 55], [74, 71], [72, 69], [75, 71], [76, 69], [77, 70], [78, 70], [83, 82], [84, 81], [85, 81], [87, 80], [86, 79], [88, 80], [89, 79], [90, 82], [91, 83], [92, 46] ]) t = 3600*12 t_step = 100000 t_interval = np.linspace(0, t, t_step) dt = t_interval[-1]- t_interval[-2] X = np.zeros((len(x0), t_step + 1)) var = len(x0) X[:,0] = x0 Rxn = np.apply_along_axis(sum, 0 ,np.apply_along_axis(sum, 0 ,eq_n)) V = stoichoi_M(X, eq_n, Rxn) delay_time = 60*20 if __name__ == "__main__" : name = "SDM_CLE_multiple.npy" pool_n = 64 n_step = 1000 slices = 100 print('Opening {0} cpus for simulation...'.format(pool_n)) print('Preparing parameter sets...') print('Preparing simulation space...') print('Loading all simulations...') start_time = time.time() holder = partial(main, ) result = async_multicore(holder,pool_n) end_time = time.time() print('Finished all simulations with {0} sec'.format(end_time - start_time)) print('Saving data...') data = [p.get() for p in result] # selected saving in linus system data = select_min(data, t_interval, t_step, slices) np.save(name, data) print("Saved successfully!")
Yu-Chuan/Survival-and-death-model
read_survial_death _model_multiple.py
<filename>read_survial_death _model_multiple.py # -*- coding: utf-8 -*- """ Created on Mon Dec 7 09:21:46 2020 @author: b308 """ import numpy as np import matplotlib.pyplot as plt import os # create the folder for npy and results path = "/Yu-Chuan/ForYuChuan/python program/survial_death_model/CLE/multiple/" f_dir = path + 'result/' try: os.makedirs(f_dir) except FileExistsError: print("The directory has been created on %s" % f_dir) except OSError: print ("Creation of the directory %s failed" % f_dir) else: print ("Successfully created the directory %s" % f_dir) #%% data loading file_name = 'SDM_CLE_multiple.npy' data = np.load(path + file_name, allow_pickle=True) t_sample = data[0] n_sample = data[1] #%% histogram of death time of cell # thershold is half of initial [caspase3] (ausumed) sample_t = [] for n in n_sample: n_50000 = np.where(n[24] <= 60000) t_50000 = t_sample[n_50000[0][-1]] sample_t.append(t_50000) sample_hr =np.array(sample_t)/3600 # unit hour bins_hr = np.arange(0,13) plt.hist(sample_hr, density = True, bins =bins_hr) plt.xlabel('time (hours)', fontsize = 14) plt.ylabel('Prob density', fontsize = 14) file_name = f_dir + 'death_distribution' + '.png' plt.savefig(file_name) #%% all trajectaries of cascase3a plt.figure() for s in n_sample: plt.plot(np.array(t_sample)/3600, s[24]) plt.xlabel('time (hours)', fontsize = 14) plt.ylabel('molecules', fontsize = 14) #%% satistics analysis # mean and std data_mean = np.mean(n_sample, axis = 0) data_std = np.std(n_sample, axis = 0) #%% plot each mean trajetories with std # all varibles si = ['TNF', 'TNFR1', 'TNFR1a' , 'TRADD', 'TNFR1a_TRADD', 'TRAF2', 'early_complex', 'RIPK1','early_complex_RIPK1', 'IKK', 'early_complex_RIPK1_IKK', 'IKKa', 'IκB_NFκB', 'IκB_NFκB_IKKa', 'IκBp', 'NFκB', 'FADD', 'early_complex_RIPK1_FADD', 'TRADD_TRAF2_RIPK1_FADD', 'Caspase8', 'TRADD_TRAF2_RIPK1_FADD_Caspase8', 'Caspase8a', 'Caspase3', 'Caspase8a_Caspase3', 'Caspase3a', 'DNA_fragmentation', 'cIAP', 'Caspase3a_cIAP' ,'DNA', 'Caspase3a_DNA', 'IκB'] for i, d in enumerate(data_mean): plt.figure(figsize=(8.5,6), linewidth = 1.5) plt.plot((t_sample/60),d + data_std[i], '#EDBB99') plt.plot((t_sample/60),d - data_std[i], '#EDBB99') plt.plot((t_sample/60),d) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('molecular numbers', fontsize = 18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) film_name = f_dir+ si[i] + '.png' plt.savefig(film_name, dpi= 1500) #%% NFkB, IKB, NFkB_IkB plt.figure(figsize=(8.5,6), linewidth = 1.5) file_name = f_dir + 'NFkB_IkB_comp.png' plt.plot(t_sample/60, data_mean[12,:]) #NFkB_IkB plt.plot(t_sample/60, data_mean[15,:]) #NFkB plt.plot(t_sample/60, data_mean[30,:]) #IkB plt.legend(['NF-$\kappa$B_I$\kappa$B','NF-$\kappa$B', 'I$\kappa$B'], fontsize = 16) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('molecular numbers', fontsize = 18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.savefig(file_name , dpi= 1500) #%% TNF, NFkB, IKB plt.figure(figsize=(8.5,6), linewidth = 1.5) file_name = f_dir + 'TNF_NFkB_IkB.png' plt.plot(t_sample/60, data_mean[0,:]) #TNF plt.plot(t_sample/60, data_mean[15,:]) #NFkB plt.plot(t_sample/60, data_mean[30,:]) #IkB plt.legend(['TNF','NF-$\kappa$B', 'I$\kappa$B'], fontsize = 16) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('molecular numbers', fontsize = 18) #plt.ylim([0, 500]) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.savefig(file_name , dpi= 1500) #%% survival complex and death complex file_name = f_dir + 'survival_and_death_complex.png' plt.figure(figsize=(8.5,6), linewidth = 1.5) plt.plot(t_sample/60, data_mean[10,:]) #survival plt.plot(t_sample/60, data_mean[20,:]) #death plt.legend(['survival complex','death comoplex'], fontsize = 16) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('molecular number', fontsize = 18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.savefig(file_name , dpi= 1500) #%% Caspase3, Caspase3_IAP file_name = f_dir + 'caspase3_Caspase3_IAP.png' plt.figure(figsize=(8.5,6), linewidth = 1.5) plt.plot(t_sample/60, data_mean[24,:]) #capase3a plt.plot(t_sample/60, data_mean[27,:]) #capase3a/cIAP plt.legend(['capase3a','capase3a_cIAP'], fontsize = 16) plt.xlabel('time (min)', fontsize = 18) plt.ylabel('molecular number', fontsize = 18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(0, 10000) plt.savefig(file_name , dpi= 1500)
WilliamOnVoyage/MontyHallSimulation
goat.py
<reponame>WilliamOnVoyage/MontyHallSimulation import random from argparse import ArgumentParser import sys DEFAULT_TRIES = 100000 DEFAULT_SEED = 1234 def setup_args(): parser = ArgumentParser(description="Simulates the probability of getting car in Monty Hall problem") parser.add_argument("-n", "--number-of-tries", dest="iterations", type=int, default=DEFAULT_TRIES, help=f"number of tries to simulate in this run, default is {DEFAULT_TRIES}") parser.add_argument("-s", "--seed", dest="seed", type=int, default=DEFAULT_SEED, help=f"random seed to use in the simulation, default is {DEFAULT_SEED}") return parser if __name__ == "__main__": arg_parser = setup_args() args = arg_parser.parse_args(sys.argv[1:]) random.seed(args.seed) iterations = args.iterations switch_hit = 0 no_switch_hit = 0 for _ in range(iterations): doors = set(range(1, 4)) car_door = random.randint(1, 3) no_switch_choice = random.randint(1, 3) # initial choice before switch switch_choice = random.randint(1, 3) remain_doors = doors.copy() # remove the chosen door and car door for host to open remain_doors.remove(switch_choice) try: remain_doors.remove(car_door) except KeyError: pass # host choose a random door to open from the remaining host_door = random.choice(list(remain_doors)) remain_doors.remove(host_door) # remaining doors to choose, need to add car door back when initial choice is not the car if switch_choice != car_door: remain_doors.add(car_door) switch_choice = random.choice(list(remain_doors)) if switch_choice == car_door: switch_hit += 1 if no_switch_choice == car_door: no_switch_hit += 1 print("Switch hits: {switch_hit} / {iterations} = {percentage}%".format(switch_hit=switch_hit, iterations=iterations, percentage=100.0 * float( switch_hit) / iterations)) print("No-switch hits: {no_switch_hit} / {iterations} = {percentage}%".format(no_switch_hit=no_switch_hit, iterations=iterations, percentage=100.0 * float( no_switch_hit) / iterations))
iafisher/precommit
precommit.py
<gh_stars>1-10 from precommitlib import checks def init(precommit): # Generic checks precommit.check(checks.NoStagedAndUnstagedChanges()) precommit.check(checks.NoWhitespaceInFilePath()) precommit.check(checks.DoNotSubmit()) # Language-specific checks precommit.check(checks.PythonFormat(exclude=["test_repo/*"])) precommit.check(checks.PythonLint(exclude=["test_repo/*"])) precommit.check(checks.PythonTypes(exclude=["test_repo/*", "setup.py"])) precommit.check(checks.PipFreeze(venv=None)) # Test suite precommit.check( checks.Command( "FunctionalTests", ["./functional_test"], exclude=["*.md"], slow=True ) )
iafisher/precommit
test_repo/bad_python_format.py
print( "hello" )
iafisher/precommit
precommitlib/lib.py
""" The main library for the precommit tool. Holds the machinery for running pre-commit checks and fixes and reporting the results. The checks themselves are defined in checks.py. Author: <NAME> (<EMAIL>) Version: May 2020 """ import ast import fnmatch import subprocess import sys import time from collections import namedtuple from typing import List, Optional, Union from . import utils class Precommit: def __init__( self, checks: List["BaseCheck"], *, check_all: bool, working: bool ) -> None: """ Parameters: checks: The list of checks to run. check_all: Whether to run all checks, including slow ones. working: Whether to check the working directory as well as staged files. """ # Calling it `self._checks` instead of `self.checks` avoids giving a confusing # error message for the common typo of `precommit.checks(...)` instead of # `precommit.check(...)`. self._checks = checks self.check_all = check_all self.working = working self.num_of_checks = 0 self.num_of_skipped_checks = 0 self.num_of_problems = 0 self.num_of_fixable_problems = 0 def check(self) -> bool: """ Finds problems and print a message for each. Returns True if any problems were found. """ if not self._checks: print("No checks were registered.") return False self.start = time.monotonic() repository = self.get_repository() files = ( repository.unstaged + repository.staged if self.working else repository.staged ) deleted_files = ( repository.unstaged_deleted + repository.staged_deleted if self.working else repository.staged_deleted ) if not (files or deleted_files): print("No files to check.") return False for check in self._checks: if not self.should_run(check): self.num_of_skipped_checks += 1 if utils.VERBOSE: self.print_check_header_and_status(check, "skipped") continue if not check.filter(files): if utils.VERBOSE: self.print_check_header_and_status(check, "skipped") continue self.pre_check(check) problem = check.check(check.filter(files), stream_output=True) status = utils.red("failed!") if problem else utils.green("passed!") self.post_check(check, status, problem) self.print_summary_for_check() return self.num_of_problems > 0 def fix(self) -> None: """Finds problems and fixes the ones that can be fixed automatically.""" if not self._checks: print("No checks were registered.") return self.start = time.monotonic() repository = self.get_repository() files = ( repository.unstaged + repository.staged if self.working else repository.staged ) deleted_files = ( repository.unstaged_deleted + repository.staged_deleted if self.working else repository.staged_deleted ) if not (files or deleted_files): print("No files to fix.") for check in self._checks: if not check.is_fixable(): continue if not self.should_run(check): self.num_of_skipped_checks += 1 if utils.VERBOSE: self.print_check_header_and_status(check, "skipped") continue if not check.filter(files): if utils.VERBOSE: self.print_check_header_and_status(check, "skipped") continue self.pre_check(check) problem = check.check(check.filter(files), stream_output=False) if problem and problem.autofix: run(problem.autofix, stream_output=True) status = utils.green("fixed!") if problem else utils.green("passed!") self.post_check(check, status, problem) run(["git", "add"] + files, stream_output=False) self.print_summary_for_fix() def print_summary_for_check(self) -> None: print() print("Ran", utils.blue(utils.plural(self.num_of_checks, "check")), end=". ") if self.num_of_problems > 0: print( f"Detected {utils.red(utils.plural(self.num_of_problems, 'issue'))}", end=". ", ) if self.num_of_fixable_problems > 0: if self.num_of_fixable_problems == self.num_of_problems: n = utils.green("all of them") else: n = utils.blue(f"{self.num_of_fixable_problems} of them") print(f"Fix {n} with `{utils.blue('precommit fix')}`.", end="") print() else: print(f"{utils.green('No issues')} detected.") if self.num_of_skipped_checks > 0: n = utils.yellow(utils.plural(self.num_of_skipped_checks, "check")) print(f"Skipped {n}", end=". ") print(f"Run all checks with `{utils.blue('precommit --all')}`.") def print_summary_for_fix(self) -> None: print() print( "Ran", utils.blue(utils.plural(self.num_of_checks, "fixable check")), end=". ", ) print( "Detected", utils.red(utils.plural(self.num_of_problems, "issue")), end=". " ) print("Fixed " + utils.green(f"{self.num_of_fixable_problems} of them") + ".") def pre_check(self, check: "BaseCheck") -> None: if utils.VERBOSE: print(f"Running {check.get_name()}") self.check_start = time.monotonic() self.num_of_checks += 1 self.print_check_header(check) def post_check( self, check: "BaseCheck", status: str, problem: Optional["Problem"] ) -> None: if problem is not None: self.num_of_problems += 1 if check.is_fixable(): self.num_of_fixable_problems += 1 self.print_check_status(status) if utils.VERBOSE: self.check_end = time.monotonic() elapsed = self.check_end - self.check_start elapsed_since_start = self.check_end - self.start print(f"Finished in {elapsed:.2f}s. ", end="") print(f"{elapsed_since_start:.2f}s since start.") print() def print_check_header_and_status(self, check: "BaseCheck", status: str) -> None: self.print_check_header(check) self.print_check_status(status) print() def print_check_header(self, check: "BaseCheck") -> None: print(utils.blue("o--[ " + check.get_name() + " ]")) def print_check_status(self, status: str) -> None: print(utils.blue("o--[ ") + status + utils.blue(" ]")) def should_run(self, check: "BaseCheck") -> bool: return not check.slow or self.check_all def get_repository(self) -> "Repository": staged = get_staged_files() staged_deleted = get_staged_deleted_files() unstaged = get_unstaged_files() unstaged_deleted = get_unstaged_deleted_files() return Repository( staged=staged, staged_deleted=staged_deleted, unstaged=unstaged, unstaged_deleted=unstaged_deleted, ) class Checklist: def __init__(self) -> None: self._checks: List["BaseCheck"] = [] def check(self, check: "BaseCheck") -> None: """Registers the pre-commit check.""" if not isinstance(check, BaseCheck): raise UsageError("check must be a subclass of BaseCheck") self._checks.append(check) class BaseCheck: def __init__( self, slow: bool = False, include: List[str] = [], exclude: List[str] = [] ) -> None: """ Parameters: slow: Whether the check is slow and should not be run by default. include: A list of patterns for file paths that the check should run on. If left as the empty list, then the check runs on all files. exclude: A list of patterns for file paths that the check should NOT run on. Takes precedence over `include`, i.e. if a file path matches a pattern in `include` and in `exclude`, the file path will be excluded. """ if isinstance(include, str): raise UsageError("include should be a list of strings") if isinstance(exclude, str): raise UsageError("exclude should be a list of strings") self.slow = slow self.include = include if include is not None else [] self.exclude = exclude if exclude is not None else [] def check(self, files: List[str], *, stream_output: bool) -> Optional["Problem"]: raise NotImplementedError def get_name(self) -> str: return self.__class__.__name__ def is_fixable(self) -> bool: return False def filter(self, paths: List[str]) -> List[str]: if self.include: filtered = [ p for p in paths if any(fnmatch.fnmatch(p, pattern) for pattern in self.include) ] else: filtered = paths if self.exclude: filtered = [ p for p in filtered if not any(fnmatch.fnmatch(p, pattern) for pattern in self.exclude) ] return filtered def decode_git_path(path: str) -> str: """ Converts a path string as Git displays it to a UTF-8 encoded string. If the file path contains a non-ASCII character or a literal double quote, Git backslash-escapes the offending character and encloses the whole path in double quotes. This function reverses that transformation and decodes the resulting bytes as UTF-8. """ if path.startswith('"') and path.endswith('"'): # TODO(2020-04-16): Do I need to add "b" and then decode, or can I just eval? return ast.literal_eval("b" + path).decode("utf-8") else: return path class Problem: def __init__( self, autofix: Optional[List[str]] = None, message: str = None ) -> None: self.autofix = autofix self.message = message def get_staged_files() -> List[str]: return _read_files_from_git(["--cached", "--diff-filter=d"]) def get_staged_deleted_files() -> List[str]: return _read_files_from_git(["--cached", "--diff-filter=D"]) def get_unstaged_files() -> List[str]: return _read_files_from_git(["--diff-filter=d"]) def get_unstaged_deleted_files() -> List[str]: return _read_files_from_git(["--diff-filter=D"]) def _read_files_from_git(args: List[str]) -> List[str]: result = run(["git", "diff", "--name-only"] + args, stream_output=False) return [decode_git_path(p) for p in result.stdout.decode("ascii").splitlines()] CommandResult = namedtuple("CommandResult", ["returncode", "stdout"]) def run( cmd: Union[List[str], str], *, shell: bool = False, stream_output: bool, working_directory: str = None, ) -> CommandResult: """ Runs a shell command. If `stream_output` is True, then the output is streamed to the console rather than captured and suppressed. Due to inconsistencies with the Python subprocess API, this function returns an object of type `CommandResult`. """ if utils.VERBOSE: cmd_as_string = " ".join(cmd) if isinstance(cmd, list) else cmd print("Running command: " + cmd_as_string) if not stream_output: r = subprocess.run( cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) return CommandResult(returncode=r.returncode, stdout=r.stdout) else: # Normally this isn't necessary, but sometimes when you pipe precommit # itself to another command or to a file (as the functional test does), then # it will print all the output of the command below before any of # precommit's output, for reasons that remain obscure to me. sys.stdout.flush() # Print the prefix before each line of the command's output by piping it to # sed. ps = subprocess.Popen( cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=working_directory, ) subprocess.run( ["sed", "-e", "s/^/" + utils.blue("| ") + "/"], stdin=ps.stdout, stderr=subprocess.STDOUT, ) returncode = ps.wait() return CommandResult(returncode=returncode, stdout=None) class Repository: def __init__( self, staged: List[str], staged_deleted: List[str], unstaged: List[str], unstaged_deleted: List[str], ) -> None: self.staged = staged self.staged_deleted = staged_deleted self.unstaged = unstaged self.unstaged_deleted = unstaged_deleted class UsageError(Exception): """Exception for incorrect usage of the precommit API."""
iafisher/precommit
precommitlib/checks.py
""" A suite of useful pre-commit checks. If you want to write your own check, you'll need to create a new class that inherits from `BaseCheck` and defines a `check` method that returns a `Problem` object if it finds any issues, or `None` otherwise. Read through the existing checks in this module for inspiration. If your check can be formulated as a shell command, you can just write a function that wraps the `Command` class. This module contains many examples of that. Author: <NAME> (<EMAIL>) Version: May 2020 """ import os import shlex import textwrap from typing import List, Optional, Union from . import utils from .lib import ( BaseCheck, Problem, UsageError, get_staged_files, get_unstaged_files, run, ) class NoStagedAndUnstagedChanges(BaseCheck): """Checks that each staged file doesn't also have unstaged changes.""" def check(self, files: List[str], *, stream_output: bool) -> Optional[Problem]: # This check is highly unusual in that it ignores the `files` parameter and # instead queries the state of the repository itself. Almost all other checks # should NOT do this. staged = get_staged_files() unstaged = get_unstaged_files() both = set(staged).intersection(set(unstaged)) if both: message = "\n".join(sorted(both)) if stream_output: _stream(message) return Problem(autofix=["git", "add"] + list(both)) return None def is_fixable(self) -> bool: return True # We construct it like this so the string literal doesn't trigger the check itself. DO_NOT_SUBMIT = "DO NOT " + "SUBMIT" class DoNotSubmit(BaseCheck): f"""Checks that files do not contain the string '{DO_NOT_SUBMIT}'.""" def check(self, files: List[str], *, stream_output: bool) -> Optional[Problem]: bad_paths = [] for path in files: with open(path, "rb") as f: if DO_NOT_SUBMIT.encode("ascii") in f.read().upper(): bad_paths.append(path) if bad_paths: message = "\n".join(sorted(bad_paths)) if stream_output: _stream(message) return Problem(message=f"file contains '{DO_NOT_SUBMIT}'") return None class NoWhitespaceInFilePath(BaseCheck): """Checks that file paths do not contain whitespace.""" def check(self, files: List[str], *, stream_output: bool) -> Optional[Problem]: bad_paths = [] for path in files: if any(c.isspace() for c in path): bad_paths.append(path) if bad_paths: message = "\n".join(sorted(bad_paths)) if stream_output: _stream(message) return Problem(message="file path contains whitespace") return None class Command(BaseCheck): def __init__( self, name: str, cmd: Union[List[str], str], fix: Optional[List[str]] = None, shell: bool = False, pass_files: bool = False, separately: bool = False, working_directory: str = None, **kwargs, ) -> None: super().__init__(**kwargs) self.name = name self.cmd = cmd self.fix = fix self.working_directory = working_directory if separately is True and pass_files is False: raise UsageError("if `separately` is True, `pass_files` must also be True") self.shell = shell self.pass_files = pass_files self.separately = separately def check(self, files: List[str], *, stream_output: bool) -> Optional[Problem]: if self.separately: problem = False for path in files: cmd: Union[List[str], str] if isinstance(self.cmd, str): cmd = self.cmd + " " + shlex.quote(path) else: cmd = self.cmd + [path] r = run( cmd, shell=self.shell, stream_output=stream_output, working_directory=self.working_directory, ) if r.returncode != 0: problem = True if problem: # TODO(2020-04-23): There should be a separate fix command for each # file path. return Problem(autofix=self.fix) else: args = files if self.pass_files else [] if isinstance(self.cmd, str): cmd = self.cmd + " " + " ".join(map(shlex.quote, args)) else: cmd = self.cmd + args r = run( cmd, shell=self.shell, stream_output=stream_output, working_directory=self.working_directory, ) if r.returncode != 0: autofix = self.fix + args if self.fix else None return Problem(autofix=autofix) return None def get_name(self) -> str: return self.name def is_fixable(self) -> bool: return self.fix is not None def PythonFormat( args: List[str] = [], *, include: List[str] = [], **kwargs ) -> BaseCheck: return Command( "PythonFormat", ["black", "--check"] + args, pass_files=True, include=["*.py"] + include, fix=["black"] + args, **kwargs, ) def PythonLint(args: List[str] = [], *, include: List[str] = [], **kwargs) -> BaseCheck: return Command( "PythonLint", ["flake8", "--max-line-length=88"] + args, pass_files=True, include=["*.py"] + include, **kwargs, ) def PythonImportOrder( args: List[str] = [], *, include: List[str] = [], **kwargs ) -> BaseCheck: return Command( "PythonImportOrder", ["isort", "-c"] + args, pass_files=True, include=["*.py"] + include, fix=["isort"] + args, **kwargs, ) def PythonTypes( args: List[str] = [], *, include: List[str] = [], **kwargs ) -> BaseCheck: return Command( "PythonTypes", ["mypy"] + args, pass_files=True, include=["*.py"] + include, **kwargs, ) def PipFreeze(venv, **kwargs): if venv is None: pip = "pip" else: pip = os.path.join(venv, "bin", "pip") return Command( "PipFreeze", f"[ ! -e requirements.txt ] || {pip} freeze | diff - requirements.txt", shell=True, **kwargs, ) def JavaScriptLint( args: List[str] = [], *, include: List[str] = [], **kwargs ) -> BaseCheck: return Command( "JavaScriptLint", ["npx", "eslint", "--max-warnings", "0"] + args, pass_files=True, include=["*.js"] + include, fix=["npx", "eslint", "--fix"], **kwargs, ) def JavaScriptPrettierFormat( args: List[str] = [], *, local_install=False, include: List[str] = [], **kwargs ) -> BaseCheck: cmd = ["npx", "prettier", "--check"] if local_install else ["prettier", "--check"] return Command( "JavaScriptPrettierFormat", cmd + args, pass_files=True, include=["*.js"] + include, fix=["npx", "prettier", "--write"], **kwargs, ) def RustFormat(args: List[str] = [], *, include: List[str] = [], **kwargs) -> BaseCheck: return Command( "RustFormat", ["cargo", "fmt", "--", "--check"] + args, pass_files=True, include=["*.rs"] + include, fix=["cargo", "fmt", "--"] + args, **kwargs, ) def TypeScriptFormat( args: List[str] = [], *, include: List[str] = [], **kwargs ) -> BaseCheck: return Command( "TypeScriptFormat", ["tsfmt", "--verify"] + args, pass_files=True, include=["*.ts"] + include, fix=["tsfmt", "-r"], **kwargs, ) def _stream(msg: str) -> None: """ Prints the message. This is the function that all checks should use to emit output, like this: if stream_output: _stream(msg) """ print(textwrap.indent(msg, utils.blue("| ")))
iafisher/precommit
precommitlib/__init__.py
<reponame>iafisher/precommit<filename>precommitlib/__init__.py from .lib import BaseCheck, Precommit, Problem, Repository # noqa: F401