text
stringlengths 8
6.05M
|
|---|
import sklearn.datasets
import sklearn.ensemble
import sklearn.model_selection
import sklearn.svm
class Objective(object):
def __init__(self, iris):
self.boston = boston
def __call__(self, trial):
x, y = self.boston.data, self.boston.target
classifier_name = trial.suggest_categorical('classifier', ['SVR', 'RandomForestRegressor'])
if classifier_name == 'SVR':
svc_c = trial.suggest_loguniform('svc_c', 1e-10, 1e10)
classifier_obj = sklearn.svm.SVR(C=svc_c, gamma='auto')
else:
rf_max_depth = int(trial.suggest_loguniform('rf_max_depth', 2, 32))
classifier_obj = sklearn.ensemble.RandomForestRegressor(
max_depth=rf_max_depth, n_estimators=10)
score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)
accuracy = score.mean()
return accuracy
if __name__ == '__main__':
import optuna
# Load the dataset in advance for reusing it each trial execution.
boston = sklearn.datasets.load_boston()
objective = Objective(boston)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=100)
print(study.best_trial)
|
import math
import matplotlib.pyplot as plt
fname = 'test'
f1 = open(fname, "r")
temp = map(int, f1.read().split())
N = temp[0]
Px = []
Py = []
for i in range(1, len(temp)):
if i%2 == 1:
Px.append(temp[i])
else:
Py.append(temp[i])
f2 = open('circle', "r")
tmp = map(float, f2.read().split())
N = len(tmp) / 3
C = []
for i in xrange(0,3*N,3):
C.append([tmp[i], tmp[i+1], tmp[i+2]])
W = max(C[N-1][0] + C[N-1][2] + 1, C[N-1][1] + C[N-1][2] + 1)
Q = max(C[N-1][0] - C[N-1][2] - 1, C[N-1][1] - C[N-1][2] - 1)
for i in range(1,N):
fig, ax = plt.subplots()
ax.plot(Px[:i], Py[:i], "ro")
# hack to scale the graph :P
ax.plot([W], [Q], "wo")
ax.plot([W], [W], "wo")
ax.plot([Q], [W], "wo")
ax.plot([Q], [Q], "wo")
ax.plot([Px[i]], [Py[i]], "bo")
ax.set_xlim(Q, W)
ax.set_ylim(Q, W)
ax.axis('scaled')
C1 = plt.Circle((C[i-1][0], C[i-1][1]), C[i-1][2], color='blue', fill=False)
ax.add_artist(C1)
fig.savefig('./images/' + 'file' + str(i) + 'a')
C1.remove()
C1 = plt.Circle((C[i][0], C[i][1]), C[i][2], color='blue', fill=False)
ax.add_artist(C1)
fig.savefig('./images/' + 'file' + str(i) + 'b')
# plt.show()
"""
W = max(C[N-1][0] + C[N-1][2] + 1, C[N-1][1] + C[N-1][2] + 1)
Q = max(C[N-1][0] - C[N-1][2] - 1, C[N-1][1] - C[][2] - 1)
W = max(C[N-1]+R+1, C[N-1]+R+1)
C1 = plt.Circle(C, R, color='blue', fill=False)
fig, ax = plt.subplots()
ax.plot(Px, Py, "ro")
ax.axis('scaled')
ax.add_artist(C1)
W = max(C[0]+R+1, C[1]+R+1)
Q = min(C[0]-R-1,C[1]-R-1 )
ax.set_xlim(Q, W)
ax.set_ylim(Q, W)
# ax.gca().set_aspect('equal', adjustable='box')
plt.show()
"""
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import multiprocessing
import time
import os
datalist=['+++']
# 我是
def adddata():
global datalist
datalist.append(1)
datalist.append(2)
datalist.append(3)
print("sub process",os.getpid(),datalist);
if __name__=="__main__":
p=multiprocessing.Process(target=adddata,args=())
p.start()
p.join()
datalist.append("a")
datalist.append("b")
datalist.append("c")
print("main process",os.getpid(),datalist)
|
n = map(str, sorted(map(int, list(input())), reverse=True))
print(''.join(n))
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a collection of distinct numbers, return all possible permutations.
#
# For example,
# [1,2,3] have the following permutations:
# [
# [1,2,3],
# [1,3,2],
# [2,1,3],
# [2,3,1],
# [3,1,2],
# [3,2,1]
# ]
# 25 / 25 test cases passed.
# Status: Accepted
# Runtime: 75 ms
# Your runtime beats 34.78 % of python submissions.
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = [[]]
for num in nums:
dummy = []
for perm in res:
for i in range(len(perm)+1):
dummy.append(perm[:i] + [num] + perm[i:])
res = dummy
return res
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
from itertools import permutations
l = list(permutations(nums))
return l
# If you're using an older Python (<2.6) for some reason or are just curious to know how it works,
# here's one nice approach, taken from http://code.activestate.com/recipes/252178/:
def all_perms(elements):
if len(elements) <=1:
yield elements
else:
for perm in all_perms(elements[1:]):
for i in range(len(elements)):
# nb elements[0:1] works in both string and list contexts
yield perm[:i] + elements[0:1] + perm[i:]
# A couple of alternative approaches are listed in the documentation of itertools.permutations. Here's one:
def permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
# And another, based on itertools.product:
from itertools import product
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def permute(xs, low=0):
if low + 1 >= len(xs):
yield xs
else:
for p in permute(xs, low + 1):
yield p
for i in range(low + 1, len(xs)):
xs[low], xs[i] = xs[i], xs[low]
for p in permute(xs, low + 1):
yield p
xs[low], xs[i] = xs[i], xs[low]
# 25 / 25 test cases passed.
# Status: Accepted
# Runtime: 68 ms
# Your runtime beats 59.84 % of python submissions.
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return []
def permute(nums, begin):
if begin >= len(nums):
self.result += nums[:],
for i in range(begin, len(nums)):
nums[begin], nums[i] = nums[i], nums[begin]
permute(nums, begin + 1)
nums[begin], nums[i] = nums[i], nums[begin]
self.result = []
permute(nums, 0)
return self.result
if __name__ == '__main__':
print(Solution().permute([x for x in range(3)]))
|
# interfaz, controlador y acceso a la base de datos
print("Hola mundo")
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# File Name: insert_db.py
# By: Daniel Lamothe
#
# Purpose: Inserts the demo data for the Dire TunaFish Creature for the first Prototype. Beginnings of the Data Access
# Layer.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__ = 'User'
__name__ = "insert_db"
import sqlite3
def create_tuna_fish():
db = sqlite3.connect('db\cst8333.db')
print("Opened database successfully.")
# Creates the Dire Tuna Fish creature
db.execute('''
INSERT INTO ACTIONS (name, description, attack, hit)
VALUES ("Slam", "Melee natural attack", "+7 to hit, reach 5ft., one target", "13 (2d8+4) bludgeoning damage" )
''')
db.execute('''
INSERT INTO ACTION_COLLECTION (action_ref)
VALUES (1)
''')
db.execute('''
INSERT INTO SENSES (darkvision)
VALUES ("Darkvision 40ft.")
''')
db.execute('''
INSERT INTO ATTRIBUTES (strength, dexterity, constitution, intelligence, wisdom, charisma)
VALUES (17, 14, 15, 5, 10, 5 )
''')
db.execute('''
INSERT INTO CREATURE (name, size, type, alignment, ac, hp, speed, attribute_ref, senses_ref, languages, challenge_rating, action_collection_ref)
VALUES ("Dire TunaFish", "Large", "beast", "true neutral", 12, "59 (7d10+21)", "70ft. (swim)", 1, 1, NULL, "2 (450xp)", 1)
''')
db.commit()
print("Changes saved.")
db.close()
# Creates the Dire Tuna Fish creature
def create_creature(creature):
db = sqlite3.connect('db\cst8333.db')
print("Opened database successfully.")
cursor = db.cursor()
# Loops through the list of action dictionaries and persists them
rowid_action_list = []
for action in creature.actionSet:
insert_action = str.format('''
INSERT INTO ACTIONS (name, description, attack, hit)
VALUES (?, ?, ?, ?, ?)
''', (action.name, action.description, action.attack, action.hit))
cursor.execute(insert_action)
rowid_action_list.append(cursor.lastrowid)
# Loops and registers each action record to an action_collection
for integer in rowid_action_list:
insert_action_collection = str.format(format('''
INSERT INTO ACTION_COLLECTION (action_ref)
VALUES (?)
''', integer))
cursor.execute(insert_action_collection)
rowid_action_collection = cursor.lastrowid
# Inserts records for Sense tablecreate_senses
insert_senses = str.format('''
INSERT INTO SENSES (darkvision, tremorsense, blindsense)
VALUES (?, ?, ?)
''', (creature.senses.get('DarkVision'), creature.senses.get('TremorSense'), creature.senses.get('BlindSense')))
db.execute(insert_senses)
rowid_sense = cursor.lastrowid
# Inserts records for Attributes table
insert_attributes = str.format('''
INSERT INTO ATTRIBUTES (strength, dexterity, constitution, intelligence, wisdom, charisma)
VALUES (?, ?, ?, ?, ?, ?)
''',
(creature.attributes.get('STR')),
(creature.attributes.get('DEX')),
(creature.attributes.get('CON')),
(creature.attributes.get('INT')),
(creature.attributes.get('WIS')),
(creature.attributes.get('CHA'))
)
db.execute(insert_attributes)
rowid_attributes = cursor.lastrowid
# Inserts the creature object into the Creature table
insert_creature = str.format('''
INSERT INTO CREATURE (name, size, type, alignment, ac, hp, speed, attribute_ref, senses_ref, languages, challenge_rating, actions_ref)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (creature.name, creature.size, creature.type, creature.ac, creature.hp, creature.speed, rowid_attributes,
rowid_sense, creature.languages, creature.challenge, rowid_action_collection))
db.execute(insert_creature)
db.commit()
print("Changes saved.")
db.close()
# Creates the basic dire tunafish as seen in Exercise 04
create_tuna_fish()
|
import json
a = json.dumps("[[a: 1, b: 2], [a: 4, b: 8]]")
print(json.loads(lambda s: s['a']))
|
class Solution(object):
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
return self.backspace(S) == self.backspace(T)
def backspace(self, s):
stack = []
for i in s:
if stack and i == "#":
stack.pop()
elif not stack and i == "#":
continue
else:
stack.append(i)
return stack
def backspace(self, s):
stack = []
for i in s:
if i != "#":
stack.append(i)
elif stack:
stack.pop()
else:
continue
return stack
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('', views.root_method),
path('another_route', views.another_method),
path('redirected_route', views.redirected_method)
]
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Link_Section)
admin.site.register(Pokraska)
admin.site.register(Uteplenie)
admin.site.register(Design)
admin.site.register(Otoplenie)
admin.site.register(Osteklenie_balkonov)
admin.site.register(Redecorating)
admin.site.register(Plitka)
admin.site.register(Krovlya)
admin.site.register(Poli)
admin.site.register(Raboti_pod_kluch)
admin.site.register(Santehnika)
admin.site.register(Potolki)
admin.site.register(Gipsokarton_peregorodki)
admin.site.register(Remont_vannoy)
admin.site.register(Reshetki)
admin.site.register(Oboi)
admin.site.register(Beton)
admin.site.register(Natyazhnoi_potolok)
admin.site.register(Workers)
|
#!/usr/bin/env python
from scapy.all import *
import sys
import argparse
import gzip
import dpkt
def generate(args, pcap_filename):
with gzip.open(pcap_filename, 'rb') if pcap_filename.endswith('.gz') else open(pcap_filename, 'rb') as pcap:
pcap_reader = dpkt.pcap.Reader(pcap)
packet_number = 0
for ts, p in pcap_reader:
packet_number += 1
sendp(p, iface = args.interface)
def main():
parser = argparse.ArgumentParser(description='FIX message generator')
parser.add_argument("-i", "--interface", default='veth4', help="bind to specified interface")
parser.add_argument("pcapfile", help="Name of the pcap file to process")
args = parser.parse_args()
generate(args, args.pcapfile)
if __name__=='__main__':
main()
|
from tkinter import *
root = Tk()
root.title('SIMPLE CALCULATOR')
frame = LabelFrame(root, text="CALCULATOR", padx=20, pady=10)
frame.pack(padx=100, pady=50)
def my_Click():
button0.configure(text = name.get())
button1.configure(text = name.get())
button2.configure(text=name.get())
button3.configure(text=name.get())
button4.configure(text = name.get())
button5.configure(text = name.get())
button6.configure(text = name.get())
button7.configure(text = name.get())
button8.configure(text = name.get())
button9.configure(text = name.get())
name = StringVar()
name_entered = Entry(root, width=30, textvariable=name)
name_entered.pack()
name_entered.focus()
button0 = Button(frame, text='0', command=my_Click).grid(row=0, column=0)
button1 = Button(frame, text='1', command=my_Click).grid(row=0, column=1)
button2 = Button(frame, text='2', command=my_Click).grid(row=0, column=2)
button3 = Button(frame, text='3', command=my_Click).grid(row=1, column=0)
button4 = Button(frame, text='4', command=my_Click).grid(row=1, column=1)
button5 = Button(frame, text='5', command=my_Click).grid(row=1, column=2)
button6 = Button(frame, text='6', command=my_Click).grid(row=2, column=0)
button7 = Button(frame, text='7', command=my_Click).grid(row=2, column=1)
button8 = Button(frame, text='8', command=my_Click).grid(row=2, column=2)
button9 = Button(frame, text='9', command=my_Click).grid(row=3, column=1)
root.mainloop()
|
from functools import reduce
import operator
def multi(lst):
return reduce(operator.mul, lst)
def add(lst):
return sum(lst)
def reverse(string):
return string[::-1]
'''
here are three functions:
Multiplication (x)
Addition (+)
and
Reverse (!esreveR)
first two use lists as input, last one a string.
'''
|
import json
import unittest
import responses
import pyyoutube
class ApiCommentTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/comments/"
BASE_URL = "https://www.googleapis.com/youtube/v3/comments"
with open(BASE_PATH + "comments_single.json", "rb") as f:
COMMENTS_INFO_SINGLE = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "comments_multi.json", "rb") as f:
COMMENTS_INFO_MULTI = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "comments_by_parent_paged_1.json", "rb") as f:
COMMENTS_PAGED_1 = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "comments_by_parent_paged_2.json", "rb") as f:
COMMENTS_PAGED_2 = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(api_key="api key")
def testGetCommentById(self) -> None:
# test parts
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_comment_by_id(comment_id="id", parts="id,not_part")
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.COMMENTS_INFO_SINGLE)
m.add("GET", self.BASE_URL, json=self.COMMENTS_INFO_MULTI)
res_by_single = self.api.get_comment_by_id(
comment_id="UgyUBI0HsgL9emxcZpR4AaABAg",
parts=["id", "snippet"],
return_json=True,
)
self.assertEqual(res_by_single["kind"], "youtube#commentListResponse")
self.assertEqual(len(res_by_single["items"]), 1)
self.assertEqual(
res_by_single["items"][0]["id"], "UgyUBI0HsgL9emxcZpR4AaABAg"
)
res_by_multi = self.api.get_comment_by_id(
comment_id=["UgyUBI0HsgL9emxcZpR4AaABAg", "Ugzi3lkqDPfIOirGFLh4AaABAg"],
parts=("id", "snippet"),
)
self.assertEqual(len(res_by_multi.items), 2)
self.assertEqual(res_by_multi.items[1].id, "Ugzi3lkqDPfIOirGFLh4AaABAg")
def testGetCommentsByParentId(self) -> None:
# test parts
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_comments(parent_id="id", parts="id,not_part")
# test paged
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_1)
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_2)
res_by_parent = self.api.get_comments(
parent_id="Ugw5zYU6n9pmIgAZWvN4AaABAg",
parts="id,snippet",
limit=2,
)
self.assertEqual(res_by_parent.kind, "youtube#commentListResponse")
self.assertEqual(len(res_by_parent.items), 3)
self.assertEqual(
res_by_parent.items[0].id,
"Ugw5zYU6n9pmIgAZWvN4AaABAg.91zT3cYb5B291za6voUoRh",
)
# test count
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_1)
res_by_parent = self.api.get_comments(
parent_id="Ugw5zYU6n9pmIgAZWvN4AaABAg",
parts="id,snippet",
count=2,
limit=2,
return_json=True,
)
self.assertEqual(len(res_by_parent["items"]), 2)
self.assertEqual(
res_by_parent["items"][0]["id"],
"Ugw5zYU6n9pmIgAZWvN4AaABAg.91zT3cYb5B291za6voUoRh",
)
# test get all comments
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_1)
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_2)
res_by_parent = self.api.get_comments(
parent_id="Ugw5zYU6n9pmIgAZWvN4AaABAg", parts="id,snippet", count=None
)
self.assertEqual(len(res_by_parent.items), 3)
# test use page token
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.COMMENTS_PAGED_2)
res_by_parent = self.api.get_comments(
parent_id="Ugw5zYU6n9pmIgAZWvN4AaABAg",
parts="id,snippet",
count=None,
page_token="R0FJeVZnbzBJTl9zNXRxNXlPWUNNaWtRQUJpQ3RNeW4wcFBtQWlBQktBTXdDam9XT1RGNlZETmpXV0kxUWpJNU1YcGhOV1ZLZUhwek1SSWVDQVVTR2xWbmR6VjZXVlUyYmpsd2JVbG5RVnBYZGs0MFFXRkJRa0ZuT2lBSUFSSWNOVHBWWjNjMWVsbFZObTQ1Y0cxSlowRmFWM1pPTkVGaFFVSkJadw==",
)
self.assertEqual(len(res_by_parent.items), 1)
|
t_api = '1272215182:AAHjwYdyiDyyW_rs4yuFIlRG_f5-ekK9O98'
w_api = 'fdd26f8f1df5fc938fc402b59ee614c1'
|
import flask, flask.views
import os
import functools
from flask import jsonify, request
from pathlib import Path
import librosa
import numpy as np
import pickle
import shutil
import socket
import re
import sys
from statistics import mean
from scipy.spatial.distance import euclidean
from keras.models import model_from_json
###################
# FLASK WEBAPP CODE
###################
app = flask.Flask(__name__)
app.secret_key = 'SECRET KEY' # PROVIDE YOUR SECRET KEY
# login credential database
users = {'SOME USER NAME #1': 'SOME PASSWORD #1', 'SOME USER NAME #2': 'SOME PASSWORD #2'} # PROVIDE LOGIN CREDENTIALS, ADD MORE IF NECESSARY
# Login page definition
class Main(flask.views.MethodView):
def get(self):
return flask.render_template('index.html')
def post(self):
if 'logout' in flask.request.form:
flask.session.pop('username', None)
return flask.redirect(flask.url_for('index'))
required = ['username', 'passwd']
for r in required:
if r not in flask.request.form:
flask.flash("Error: {} is required.".format(r))
return flask.redirect(flask.url_for('index'))
username = flask.request.form['username']
passwd = flask.request.form['passwd']
if username in users and users[username] == passwd:
flask.session['username'] = username
else:
flask.flash("Username doesn't exist or incorrect password")
return flask.redirect(flask.url_for('index'))
def login_required(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
if 'username' in flask.session:
return method(*args, **kwargs)
else:
flask.flash("A login is required to see the page!")
return flask.redirect(flask.url_for('index'))
return wrapper
# Music page definition
class Music(flask.views.MethodView):
@login_required
def get(self):
abs_songs = [] # to save absolute paths of recommended songs in original locations
lib_songs = [] # to save relative paths of recommended songs in music library
root = Path('.') # root directory of web server
# Load the dictionary of previously sorted distances
dist_sorted = pickle.load(open("dist_sorted.pkl", "rb"))
print(dist_sorted)
# Populate abs_songs with original absolute paths
for k in dist_sorted.keys():
abs_songs.append(song_path[k])
# Populate lib_songs with relative paths of songs from the music library
for s in abs_songs:
assert (sys.platform == 'darwin' or sys.platform == 'win32'), "Unsuitable OS used. Pls use either MacOS or Windows instead"
# MacOS
if sys.platform == 'darwin':
song = str(s).split('/')[-1] # extract file name from full path (original location)
track = list(root.glob("**/" + song))[0]
m = re.search(r'^.*\/(.*\/.*)$', str(track))
lib_songs.append(m.group(1))
# Windows
else:
song = str(s).split('\\')[-1] # extract file name from full path (original location)
track = list(root.glob("**\\" + song))[0]
m = re.search(r'^.*\\(.*\\.*)$', str(track))
lib_songs.append(m.group(1).replace('\\', '/'))
# Send recommended song list for rendering
return flask.render_template('music.html', songs=lib_songs)
app.add_url_rule('/', view_func=Main.as_view('index'), methods=['GET', 'POST'])
app.add_url_rule('/music/', view_func=Music.as_view('music'), methods=['GET'])
########################
# NON-FLASK RELATED CODE
########################
#
# ************************************************
# Step 1: Load Previously trained Genre Classifier
# ************************************************
# Load the json file that contains the model's structure
f = Path("music_genre_classifier_structure.json")
model_structure = f.read_text()
# Recreate music genre classifier model from json data
model = model_from_json(model_structure)
# Re-load the model's trained weights
model.load_weights("music_genre_classifier_weights.h5")
# *****************************************************
# Step 2: Create Feature Vectors of Unclassified Songs
# *****************************************************
# Create a list of absolute paths for all music tracks
assert (sys.platform == 'darwin' or sys.platform == 'win32'), "Unsuitable OS used!!"
# MacOS
if sys.platform == 'darwin': # MacOS
source = Path('[ROOT PATH TO YOUR OWN MUSIC LIBRARY]') # <--- UPDATE THIS LINE B4 USING!!
file_extension = "**/*.m4a" # <--- UPDATE THIS LINE WITH THE RIGHT FILE EXTENSION B4 USING!!
else: # Windows
source = Path('[ROOT PATH TO YOUR OWN MUSIC LIBRARY]') # <--- UPDATE THIS LINE B4 USING!!
file_extension = "**\*.m4a" # <--- UPDATE THIS LINE WITH THE RIGHT FILE EXTENSION B4 USING!!
song_path = [file for file in source.glob(file_extension)]
# Load the standardizer (scaler) from previously trained model
standardizer = pickle.load(open("scaler.pkl", "rb"))
def feature_extraction(song_path, scaler):
"""
ACTION:
-- Create an array of 26-feature vectors (standardized) from all songs listed in the song_path
INPUT:
-- song_path = a list of absolute file paths of songs
-- scaler = standardizer previously created from model training
OUTPUT:
-- X_scaled = an array of 26-feature vectors (standardized)
"""
array = []
count = 0
total = len(song_path)
for song in song_path:
count += 1
print(f">>> Song #{count} of {total} <<<")
print(song)
# Extract 26 features (individual arrays) from each song
y, sr = librosa.load(song) # full length of song
chroma_stft = librosa.feature.chroma_stft(y) # chromagram
rmse = librosa.feature.rmse(y) # root-mean-square error (RMSE) value for each frame
spec_cent = librosa.feature.spectral_centroid(y)
spec_bw = librosa.feature.spectral_bandwidth(y)
rolloff = librosa.feature.spectral_rolloff(y)
zcr = librosa.feature.zero_crossing_rate(y)
mfcc = librosa.feature.mfcc(y) # 20 Mel-frequency cepstral coefficients (MFCCs)
# Create a 26-feature vector from the mean value of each feature array
vector = []
vector.append(np.mean(chroma_stft))
vector.append(np.mean(rmse))
vector.append(np.mean(spec_cent))
vector.append(np.mean(spec_bw))
vector.append(np.mean(rolloff))
vector.append(np.mean(zcr))
for e in mfcc:
vector.append(np.mean(e))
array.append(vector)
# Standardize 26-feature vectors with the trained scaler
X = np.array(array)
X_scaled = scaler.transform(X)
return X_scaled
# "vec_library.npy" is an exported copy of an array that contains all 26-feature vectors
# generated from the local music library. The file may exist.
my_library = Path("vec_library.npy")
# If the feature array doesnt exist or isnt up-to-date, create this array (maybe time-consuming)
if not (my_library.is_file() and len(np.load(my_library, allow_pickle=True))==len(song_path)):
X = feature_extraction(song_path, standardizer)
np.save(my_library, X)
else:
X = np.load(my_library, allow_pickle=True)
# *********************************************
# Step 3: Predict Genres of Unclassified Songs
# *********************************************
# Load the genre label encoder from previously trained model
encoder = pickle.load(open("encoder.pkl", "rb"))
def genre_prediction(X, classifier, labelcoder):
"""
ACTION:
-- Predict genre of each song based on its standardized 26-feature vector in the array (X)
INPUTS:
-- X = an array containing standardized 26-feature vectors of all the songs
-- classifier = genre classifier model (to predict genre)
-- labelcoder = genre label encoder (to convert genre code to genre name)
OUTPUT:
-- genres = an array containing genre names of all the songs
"""
prediction = classifier.predict(X)
# Predict genre of each song with the highest probability
g = [np.argmax(prediction[i]) for i in range(prediction.shape[0])]
# Convert genre code to genre name for each song
genres = encoder.inverse_transform(g)
return genres
# "genres.npy" is an exported copy of an array that contains genres of all the songs in the local music library.
# This file may exist.
my_genres = Path("genres.npy")
# If the genre array doesnt exist or isnt up-to-date, create this array
if not (my_genres.is_file() and len(np.load(my_genres, allow_pickle=True))==len(song_path)):
genres = genre_prediction(X, model, encoder)
np.save(my_genres, genres)
else:
genres = np.load(my_genres, allow_pickle=True)
# *****************************************
# Step 4: Create Music Library in Web Site
# *****************************************
parent = Path('.')
library = parent/"static/music"
def create_library(parent_folder, song_loc, genres):
"""
ACTION:
-- Create music library, if not exists, under the web server root directory
INPUTS:
-- parent_folder = the library folder
-- song_loc = a list of songs' absolute paths
-- genres = an array of genres identified by the <genre_prediction> function
OUTPUT:
-- <NULL>
"""
i = 0
files = [x for x in parent_folder.glob('**/*.m4a')] # UPDATE FILE EXTENSION IF NECESSARY
# Only copy files when the library is empty or not up-to-date
if len(song_loc) != len(files):
# Clear the entire library
shutil.rmtree(parent_folder, ignore_errors=True)
# Create library
for genre in genres:
p = parent_folder/genre
# Create folder if not exists
if not p.exists():
p.mkdir(parents=True)
# Copy songs to designated folders
shutil.copy(song_loc[i], str(p))
i += 1
create_library(library, song_path, genres)
# *******************************************************
# Step 5: Find Mid-point Feature Vector of a given Genre
# *******************************************************
def genre_midpoint_vector(genres, X):
"""
PURPOSE:
-- Mid-point vector will be used at the client side when requesting remote web servers for song recommendations.
-- When a reference track is not given, the client needs to find a mid-point feature vector from
-- all songs of that particular genre in the client's library.
ACTIONS:
-- Compute and return the 26-feature mid-point vector of each genre found in the client's machine
INPUTS:
-- genres = genre array returned by <genre_prediction> function
-- X = standardized feature array returned by <feature_extraction> function
Outputs:
-- mid_dict = dict{genres: mid-point vectors} for ease of reference
"""
# Initialize a dictionary to record the 26-feature mid-point vector for each genre in source
mid_dict = {}
for g in set(genres): # remove duplicate genres
# Find indices that correspond to the given genre (g) in the genre array (genres)
indices = np.where(genres==g)
# Compute the mid-point vector for each genre
mid = np.average(X[indices], axis=0)
# Save the mid-point vector to dictionary (mid_dict)
mid_dict[g] = mid
return mid_dict
midpoint = genre_midpoint_vector(genres, X)
def get_ip():
"""
ACTION:
-- Find IP address of the host (receiver) running this function
INPUT:
-- <NULL>
OUTPUT:
-- IP = Host IP address in a string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
@app.route('/distance', methods=['POST', 'GET'])
def min_dest_distance():
"""
ACTION:
-- Given a genre (g), compute the Euclidean distance between a given feature vector (vec) and each vector of
the same genre from the music library of the receiving web server
-- Return a dictionary that contains Web Server's IP and Mean Euclidean Distance, where distance = -1
implies this server doesnt have any track of the requested genre
VARIABLES:
-- g = genre name either passed from source chatbot or determined from source reference track by
<genre_prediction> function
-- vec = mid-point feature vector of the genre returned from the <genre_midpoint_vector> function or extracted
feature vector of a reference track from the <feature_extraction> function.
-- destX = Standardized feature vector array of all the tracks in destination web server
-- destGenres = genre array of all the tracks in destination web server
OUTPUT:
-- Dictionary {Web Server's IP: Mean Euclidean Distance}
"""
input_params = request.get_json()
g = input_params['g'] # passed from chatbot
vec = np.array(input_params["vec"]) # passed from chatbot
destX = X # generated by function "feature_extraction()"
destGenres = genres # generated by function "genre_prediction()"
# Initialization
dist = {} # dict for saving Euclidean distances
mean_dist = -1 # for indicating this web server has no song of the requested genre (default)
host_ip = get_ip() # IP address of this web server
# Obtain indices of all songs with the same genre (g)
indices = np.where(destGenres==g) # a one-element tuple containing indices as an array
# Select feature vectors of all songs with the same genre (g)
filtered_destX = destX[indices]
# Calculate Euclidean distance between given vector and each vector in the destination server and
# Save the distance with its correpsonding index in the dict (dist)
for i, v in zip(indices[0], filtered_destX):
dist[i] = dist.get(i, euclidean(v, vec))
# Sort dict by distance in ascending order
dist_sorted = dict(sorted(dist.items(), key=lambda kv: kv[1]))
# Save sorted dict as a pickle file to be used later for music recommendation
pickle.dump(dist_sorted, open("dist_sorted.pkl", "wb"))
# Find the mean Euclidean distance
if len(dist_sorted) > 0: # dist_sorted not empty
mean_dist = mean(dist_sorted.values())
# mean_dist = -1 if dist_sorted is empty
return jsonify({'host_ip': host_ip, 'mean_dist': mean_dist})
app.run(host='0.0.0.0', port=5001)
|
# Generated by Django 2.1.1 on 2018-09-27 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_remove_type_price_type'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug_product',
field=models.SlugField(blank=True, editable=False, max_length=128, null=True, unique=True),
),
migrations.AlterField(
model_name='type',
name='slug_type',
field=models.SlugField(blank=True, editable=False, max_length=128, null=True, unique=True),
),
]
|
import os
from dataloader import CIFAR10
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as datautil
from model import network
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')
print(device)
paths = ['./data/cifar-10-batches-py/data_batch_1',
'./data/cifar-10-batches-py/data_batch_2',
'./data/cifar-10-batches-py/data_batch_3',
'./data/cifar-10-batches-py/data_batch_4',
'./data/cifar-10-batches-py/data_batch_5']
def main():
data = CIFAR10(paths)
data_loader = datautil.DataLoader(dataset=data, batch_size=512, num_workers=8, shuffle=True)
epoch = 0
net = network().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
running_loss = 0
for ep in tqdm(range(epoch, 50)):
for i, (image,label) in tqdm(enumerate(data_loader)):
optimizer.zero_grad()
# print (repr(image))
output = net(image.to(device,dtype=torch.float32))
# print(label)
loss = criterion(output, label.to(device))
loss.backward()
optimizer.step()
# running_loss += loss.item()
if i%2000 == 0:
print(loss)
pass
torch.save(net.state_dict(), './model/model1.pt')
if __name__ == '__main__':
main()
|
"""
Tests of the neo.core.block.Block class
"""
from datetime import datetime
from copy import deepcopy
import unittest
import numpy as np
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.block import Block
from neo.core.container import filterdata
from neo.core import SpikeTrain, Unit, AnalogSignal
from neo.test.tools import (assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, clone_object,
get_annotations, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {str(x): TEST_ANNOTATIONS[x] for x in
range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
file_datetime = get_fake_value('file_datetime', datetime, seed=0)
rec_datetime = get_fake_value('rec_datetime', datetime, seed=1)
index = get_fake_value('index', int, seed=2)
name = get_fake_value('name', str, seed=3, obj=Block)
description = get_fake_value('description', str, seed=4, obj='Block')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'file_datetime': file_datetime,
'rec_datetime': rec_datetime,
'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Block, annotate=False, seed=0)
res12 = get_fake_values('Block', annotate=False, seed=0)
res21 = get_fake_values(Block, annotate=True, seed=0)
res22 = get_fake_values('Block', annotate=True, seed=0)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = 'Block'
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
for child in res.children_recur:
del child.annotations['i']
del child.annotations['j']
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 1)
seg = res.segments[0]
self.assertEqual(seg.annotations, self.annotations)
self.assertEqual(len(res.channel_indexes), 1)
chx = res.channel_indexes[0]
self.assertEqual(chx.annotations, self.annotations)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.irregularlysampledsignals), 1)
self.assertEqual(len(seg.spiketrains), 1)
self.assertEqual(len(seg.events), 1)
self.assertEqual(len(seg.epochs), 1)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(seg.spiketrains[0].annotations,
self.annotations)
self.assertEqual(seg.events[0].annotations,
self.annotations)
self.assertEqual(seg.epochs[0].annotations,
self.annotations)
self.assertEqual(len(chx.units), 1)
unit = chx.units[0]
self.assertEqual(unit.annotations, self.annotations)
self.assertEqual(len(chx.analogsignals), 1)
self.assertEqual(chx.analogsignals[0].annotations,
self.annotations)
self.assertEqual(len(unit.spiketrains), 1)
self.assertEqual(unit.spiketrains[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = Block
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 0)
self.assertEqual(len(res.channel_indexes), 0)
class TestBlock(unittest.TestCase):
def setUp(self):
self.nchildren = 2
self.seed1 = 0
self.seed2 = 10000
self.blk1 = fake_neo(Block, seed=self.seed1, n=self.nchildren)
self.blk2 = fake_neo(Block, seed=self.seed2, n=self.nchildren)
self.targobj = self.blk1
self.segs1 = self.blk1.segments
self.segs2 = self.blk2.segments
self.chxs1 = self.blk1.channel_indexes
self.chxs2 = self.blk2.channel_indexes
self.units1 = [[unit for unit in chx.units] for chx in self.chxs1]
self.units2 = [[unit for unit in chx.units] for chx in self.chxs2]
self.units1 = sum(self.units1, [])
self.units2 = sum(self.units2, [])
self.sigarrs1 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs1]
self.sigarrs2 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs2]
self.trains1 = [[train for train in unit.spiketrains]
for unit in self.units1]
self.trains2 = [[train for train in unit.spiketrains]
for unit in self.units2]
self.irsigs1 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs1]
self.irsigs2 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs2]
self.epcs1 = [[epc for epc in seg.epochs]
for seg in self.segs1]
self.epcs2 = [[epc for epc in seg.epochs]
for seg in self.segs2]
self.evts1 = [[evt for evt in seg.events]
for seg in self.segs1]
self.evts2 = [[evt for evt in seg.events]
for seg in self.segs2]
self.img_seqs1 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs1]
self.img_seqs2 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs2]
self.sigarrs1 = sum(self.sigarrs1, [])
self.sigarrs2 = sum(self.sigarrs2, [])
self.trains1 = sum(self.trains1, [])
self.trains2 = sum(self.trains2, [])
self.irsigs1 = sum(self.irsigs1, [])
self.irsigs2 = sum(self.irsigs2, [])
self.epcs1 = sum(self.epcs1, [])
self.epcs2 = sum(self.epcs2, [])
self.evts1 = sum(self.evts1, [])
self.evts2 = sum(self.evts2, [])
self.img_seqs1 = sum(self.img_seqs1, [])
self.img_seqs2 = sum(self.img_seqs2, [])
def test_block_init(self):
blk = Block(name='a block')
assert_neo_object_is_compliant(blk)
self.assertEqual(blk.name, 'a block')
self.assertEqual(blk.file_origin, None)
def check_creation(self, blk):
assert_neo_object_is_compliant(blk)
seed = blk.annotations['seed']
targ0 = get_fake_value('file_datetime', datetime, seed=seed + 0)
self.assertEqual(blk.file_datetime, targ0)
targ1 = get_fake_value('rec_datetime', datetime, seed=seed + 1)
self.assertEqual(blk.rec_datetime, targ1)
targ2 = get_fake_value('index', int, seed=seed + 2, obj=Block)
self.assertEqual(blk.index, targ2)
targ3 = get_fake_value('name', str, seed=seed + 3, obj=Block)
self.assertEqual(blk.name, targ3)
targ4 = get_fake_value('description', str, seed=seed + 4, obj=Block)
self.assertEqual(blk.description, targ4)
targ5 = get_fake_value('file_origin', str)
self.assertEqual(blk.file_origin, targ5)
targ6 = get_annotations()
targ6['seed'] = seed
self.assertEqual(blk.annotations, targ6)
self.assertTrue(hasattr(blk, 'channel_indexes'))
self.assertTrue(hasattr(blk, 'segments'))
self.assertEqual(len(blk.channel_indexes), self.nchildren)
self.assertEqual(len(blk.segments), self.nchildren)
def test__creation(self):
self.check_creation(self.blk1)
self.check_creation(self.blk2)
def test__merge(self):
blk1a = fake_neo(Block,
seed=self.seed1, n=self.nchildren)
assert_same_sub_schema(self.blk1, blk1a)
blk1a.annotate(seed=self.seed2)
blk1a.segments.append(self.segs2[0])
blk1a.merge(self.blk2)
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
assert_same_sub_schema(chxs1a + self.chxs2,
blk1a.channel_indexes)
assert_same_sub_schema(segs1a + self.segs2,
blk1a.segments)
def test__children(self):
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
self.assertEqual(self.blk1._container_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._data_child_objects, ())
self.assertEqual(self.blk1._single_parent_objects, ())
self.assertEqual(self.blk1._multi_child_objects, ())
self.assertEqual(self.blk1._multi_parent_objects, ())
self.assertEqual(self.blk1._child_properties,
('Unit',))
self.assertEqual(self.blk1._single_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._container_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._data_child_containers, ())
self.assertEqual(self.blk1._single_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._single_parent_containers, ())
self.assertEqual(self.blk1._multi_child_containers, ())
self.assertEqual(self.blk1._multi_parent_containers, ())
self.assertEqual(self.blk1._child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._parent_objects, ())
self.assertEqual(self.blk1._parent_containers, ())
self.assertEqual(len(self.blk1._single_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1._multi_children), 0)
self.assertEqual(len(self.blk1.data_children), 0)
self.assertEqual(len(self.blk1.data_children_recur),
1 * self.nchildren ** 3 + 5 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.container_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.container_children_recur),
2 * self.nchildren + 1 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.children_recur),
2 * self.nchildren +
6 * self.nchildren ** 2 +
1 * self.nchildren ** 3)
self.assertEqual(self.blk1._multi_children, ())
assert_same_sub_schema(list(self.blk1._single_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children_recur),
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:])
assert_same_sub_schema(list(self.blk1.data_children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:],
exclude=['channel_index'])
assert_same_sub_schema(list(self.blk1.children),
segs1a + chxs1a)
assert_same_sub_schema(list(self.blk1.children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:] +
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:],
exclude=['channel_index'])
def test__size(self):
targ = {'segments': self.nchildren,
'channel_indexes': self.nchildren}
self.assertEqual(self.targobj.size, targ)
def test__filter_none(self):
targ = []
# collecting all data objects in target block
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.epochs)
targ.extend(seg.events)
targ.extend(seg.irregularlysampledsignals)
targ.extend(seg.spiketrains)
targ.extend(seg.imagesequences)
res1 = self.targobj.filter()
res2 = self.targobj.filter({})
res3 = self.targobj.filter([])
res4 = self.targobj.filter([{}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter([{}, {}])
res7 = self.targobj.filter(targdict={})
res8 = self.targobj.filter(targdict=[])
res9 = self.targobj.filter(targdict=[{}])
res10 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
def test__filter_annotation_single(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]])
res0 = self.targobj.filter(j=1)
res1 = self.targobj.filter({'j': 1})
res2 = self.targobj.filter(targdict={'j': 1})
res3 = self.targobj.filter([{'j': 1}])
res4 = self.targobj.filter(targdict=[{'j': 1}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
name = self.trains2[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.trains1[0]])
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=1)
res1 = self.targobj.filter({'name': name, 'j': 1})
res2 = self.targobj.filter(targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
name0 = self.sigarrs2[0].name
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, j=5)
res2 = self.targobj.filter([{}], i=6)
res3 = self.targobj.filter({'name': name0}, j=1)
res4 = self.targobj.filter(targdict={'name': name0}, j=1)
res5 = self.targobj.filter(name=name0, targdict={'j': 1})
res6 = self.targobj.filter(name=name0, j=5)
res7 = self.targobj.filter({'name': name0, 'j': 5})
res8 = self.targobj.filter(targdict={'name': name0, 'j': 5})
res9 = self.targobj.filter({'name': name0}, j=5)
res10 = self.targobj.filter(targdict={'name': name0}, j=5)
res11 = self.targobj.filter(name=name0, targdict={'j': 5})
res12 = self.targobj.filter({'name': name0}, j=5)
res13 = self.targobj.filter(targdict={'name': name0}, j=5)
res14 = self.targobj.filter(name=name0, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres_annotation_attribute(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=90)
res1 = self.targobj.filter({'name': name, 'j': 90})
res2 = self.targobj.filter(targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_partres_annotation_annotation(self):
targ = self.trains1[::2]
res0 = self.targobj.filter([{'j': 0}, {'i': 0}])
res1 = self.targobj.filter({'j': 0}, i=0)
res2 = self.targobj.filter([{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_no_annotation_but_object(self):
targ = []
for seg in self.targobj.segments:
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=SpikeTrain)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
res = self.targobj.filter(objects=AnalogSignal)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=[AnalogSignal, SpikeTrain])
assert_same_sub_schema(res, targ)
def test__filter_single_annotation_obj_single(self):
targ = self.trains1[1::2]
res0 = self.targobj.filter(j=1, objects='SpikeTrain')
res1 = self.targobj.filter(j=1, objects=SpikeTrain)
res2 = self.targobj.filter(j=1, objects=['SpikeTrain'])
res3 = self.targobj.filter(j=1, objects=[SpikeTrain])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
def test__filter_single_annotation_norecur(self):
targ = []
res0 = self.targobj.filter(j=1, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=1, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]])
res0 = self.targobj.filter(j=1, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_data(self):
targ = [self.trains1[0]]
res0 = self.targobj.filter(name=self.trains1[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1, container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = [self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]]
res0 = self.targobj.filter(j=1,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3],
self.trains1[0]])
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=1)
res1 = filterdata(data, {'name': name, 'j': 1})
res2 = filterdata(data, targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
name1 = self.sigarrs1[0].name
name2 = self.sigarrs2[0].name
res0 = filterdata(data, [{'j': 6}, {}])
res1 = filterdata(data, {}, i=6)
res2 = filterdata(data, [{}], i=6)
res3 = filterdata(data, name=name1, targdict={'j': 1})
res4 = filterdata(data, {'name': name1}, j=1)
res5 = filterdata(data, targdict={'name': name1}, j=1)
res6 = filterdata(data, name=name2, j=6)
res7 = filterdata(data, {'name': name2, 'j': 6})
res8 = filterdata(data, targdict={'name': name2, 'j': 6})
res9 = filterdata(data, {'name': name2}, j=6)
res10 = filterdata(data, targdict={'name': name2}, j=6)
res11 = filterdata(data, name=name2, targdict={'j': 6})
res12 = filterdata(data, {'name': name1}, j=6)
res13 = filterdata(data, targdict={'name': name1}, j=6)
res14 = filterdata(data, name=name1, targdict={'j': 6})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres_annotation_attribute(self):
data = self.targobj.children_recur
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=90)
res1 = filterdata(data, {'name': name, 'j': 90})
res2 = filterdata(data, targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_partres_annotation_annotation(self):
data = self.targobj.children_recur
targ = (self.trains1[::2] +
self.segs1[:1] + self.units1[::2])
res0 = filterdata(data, [{'j': 0}, {'i': 0}])
res1 = filterdata(data, {'j': 0}, i=0)
res2 = filterdata(data, [{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
# @unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
# def test__pretty(self):
# res = pretty(self.blk1)
# ann = get_annotations()
# ann['seed'] = self.seed1
# ann = pretty(ann).replace('\n ', '\n ')
#
# seg0 = pretty(self.segs1[0])
# seg1 = pretty(self.segs1[1])
# seg0 = seg0.replace('\n', '\n ')
# seg1 = seg1.replace('\n', '\n ')
#
# targ = ("Block with " +
# ("%s segments, %s channel_indexes\n" %
# (len(self.segs1), len(self.chxs1))) +
# ("name: '%s'\ndescription: '%s'\n" % (self.blk1.name,
# self.blk1.description)) +
# ("annotations: %s\n" % ann) +
# ("file_origin: '%s'\n" % self.blk1.file_origin) +
# ("file_datetime: %s\n" % repr(self.blk1.file_datetime)) +
# ("rec_datetime: %s\n" % repr(self.blk1.rec_datetime)) +
# ("index: %s\n" % self.blk1.index) +
#
#
# ("# segments (N=%s)\n" % len(self.segs1)) +
# ('%s: %s\n' % (0, seg0)) +
# ('%s: %s' % (1, seg1)))
#
# self.assertEqual(res, targ)
def test_block_list_units(self):
assert_same_sub_schema(self.units1, self.blk1.list_units)
assert_same_sub_schema(self.units2, self.blk2.list_units)
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class(Unit))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class(Unit))
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class('Unit'))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('Unit'))
def test__deepcopy(self):
blk1_copy = deepcopy(self.blk1)
# Check links from parents to children
assert_same_sub_schema(blk1_copy, self.blk1)
# Check links from children to parents
for segment in blk1_copy.segments:
self.assertEqual(id(segment.block), id(blk1_copy))
for sig in segment.analogsignals:
self.assertEqual(id(sig.segment), id(segment))
for sptr in segment.spiketrains:
self.assertEqual(id(sptr.segment), id(segment))
for chidx in blk1_copy.channel_indexes:
self.assertEqual(id(chidx.block), id(blk1_copy))
for sig in chidx.analogsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for sig in chidx.irregularlysampledsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for unit in chidx.units:
self.assertEqual(id(unit.channel_index), id(chidx))
for sptr in unit.spiketrains:
self.assertEqual(id(sptr.unit), id(unit))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by misaka-10032 (longqic@andrew.cmu.edu).
All rights reserved.
Get gadgets from asm
"""
__author__ = 'misaka-10032'
import argparse
import re
import subprocess
def get_bytes(line):
try:
return re.search('.*:\t(([0-9a-f]{2,} )*).*', line).group(1)
except AttributeError, e:
return ''
def main(args):
f = open(args.asm, 'rb')
bytes = ''
for line in f:
bytes += get_bytes(line)
segs = filter(lambda s: len(s) > 0, bytes.split('c3 '))
out = ''
for seg in segs:
bytes = seg.split(' ')[:-1]
gs = [bytes[-i:] for i in xrange(1, len(bytes)+1)]
for g in gs:
bytes = ' '.join(g) + ' c3'
prog = ("echo '%s' | udcli -64 -x -att" % bytes)
popen = subprocess.Popen(prog, shell=True, stdout=subprocess.PIPE)
result = popen.stdout.read()
if 'invalid' not in result:
out += result
out += '\n'
print out
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('asm', help='Input asm')
main(parser.parse_args())
|
pref_cred = {}
|
from .abstract_repository_analyzer import AbstractRepositoryAnalyzer
import subprocess
import os
import logging
class MercurialRepositoryAnalyzer(AbstractRepositoryAnalyzer):
"""
Analysis plug-in for mercurial repositories.
"""
def count_repo_branches(self, repo_path: str, remote: str) -> None:
"""
Counts the repository's branches.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
branches = subprocess.check_output("cd " + repo_path + ";hg branches | wc -l", shell=True)
self.get_details(remote)["branch_count"] = int(branches)
def count_repo_contributors(self, repo_path: str, remote: str) -> None:
"""
Counts the repository's contributors.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
contributors = subprocess.check_output('cd ' + repo_path + ';hg log --template "{author|person}\n" | sort | uniq | wc -l', shell=True)
self.get_details(remote)["contributors"] = int(contributors)
def extract_repo_url(self, repo_path: str) -> str:
"""
Extracts the Remote URL from a given SVN repository-path.
:param repo_path: path to the repository root.
:return: Remote URL
"""
try:
return subprocess.check_output("cd " + repo_path + ";hg paths default", shell=True).decode("utf-8").rstrip("\n")
except subprocess.CalledProcessError:
return ""
def extract_last_repo_update(self, repo_path: str, remote: str) -> None:
"""
Extracts the repository's last update-timestamp.
:param repo_path: path to the repository root.
:param remote: remote uri of the branches
:return: None
"""
timestamp = subprocess.check_output("cd " + repo_path + ";hg log --limit 1 --template '{date(date, \"%s\")}'", shell=True)
self.get_details(remote)["last_update"] = int(timestamp)
def _analyze(self, path: str, repo_details: dict) -> None:
self._repo_details = repo_details
for folder in os.listdir(path):
# Build path and inform user...
current_path = path + "/" + folder + ""
logging.info("[MercurialRepositoryAnalyzer]: Analyzing:" + current_path)
# Extract origin url.
origin_url = self.extract_repo_url(current_path)
# If origin_url is empty string, then this is not a valid mercurial-repository.
if origin_url != "":
# Mercurial analysis.
self.count_repo_contributors(current_path, origin_url)
self.count_repo_branches(current_path, origin_url)
self.extract_last_repo_update(current_path, origin_url)
yield (current_path, origin_url)
else:
logging.warning("[MercurialRepositoryAnalyzer]: " + current_path + " is not a valid repository...")
def analyzes(self):
return "hg"
|
import requests
import re
import json
import csv
from bs4 import BeautifulSoup
url = "http://catalog.gatech.edu/coursesaz/"
base_url = "http://catalog.gatech.edu"
def get_hrefs():
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
courses = soup.find('div', id="atozindex")
links = courses.find_all('a', href=True)
hrefs = [link['href'] for link in links]
hrefs = [base_url + href for href in hrefs]
return hrefs
def generate_course_data():
hrefs = get_hrefs()
# generate_json(hrefs)
generate_csv(hrefs)
def generate_json(hrefs):
courses_info = {}
for department in hrefs:
page = requests.get(department)
soup = BeautifulSoup(page.content, 'html.parser')
courses = soup.find_all(class_='courseblock')
dept_info = []
for course in courses:
course_title = course.find(class_='courseblocktitle').text.strip()
course_desc = course.find(class_='courseblockdesc').text
identifier = course_title.split('.')[0].strip()
identifier = identifier.replace('\xa0', ' ')
name = course_title.split('.')[1:-2]
course_name = ""
for n in name:
course_name += n
credits = course_title.split('.')[-2].split()[0]
description = re.sub(r'[^a-zA-Z0-9\s\.\\/\$\+\*\(\)\?\{\}-]', "", course_desc).strip()
description = description.replace("\'", r"\'")
course_info = {"course": identifier, "name": course_name, "credits": credits, "description": description}
dept_info.append(course_info)
courses_info[department[36:-1].upper()] = dept_info
json.dump(courses_info, open('course_data.json', 'w'))
def generate_csv(hrefs):
course_info = []
for department in hrefs:
page = requests.get(department)
soup = BeautifulSoup(page.content, 'html.parser')
courses = soup.find_all(class_='courseblock')
for course in courses:
course_title = course.find(class_='courseblocktitle').text.strip()
course_desc = course.find(class_='courseblockdesc').text
identifier = course_title.split('.')[0].strip()
identifier = identifier.replace('\xa0', ' ')
name = course_title.split('.')[1:-2]
course_name = ""
for n in name:
course_name += n
course_name = course_name.replace("\'", r"\'").strip() # needed for sql
course_name = course_name.replace(",", " ") #remove , for sql bulk insert purposes
credits = course_title.split('.')[-2].split()[0]
credits = credits.replace(",", "-")
if not credits[0].isnumeric():
print(f"We have something weird here course title is: {course_title}")
description = re.sub(r'[^a-zA-Z0-9\s\.\\/\$\+\*\(\)\?\{\}-]', "", course_desc).strip()
description = description.replace("\'", r"\'")
description = description.replace(",", "")
course_info.append([identifier, course_name, credits, description])
with open('course_data.csv', 'w') as fout:
writer = csv.writer(fout)
writer.writerows(course_info)
generate_course_data()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.desk.reportview import get_match_cond, get_filters_cond
class Exam(Document):
pass
@frappe.whitelist()
def get_chapter_list(course):
chapterlist = frappe.get_all('Chapter', fields=['name'], filters = {'course': course})
return chapterlist
@frappe.whitelist()
def get_course_list(program):
courselist = frappe.get_all('Program Course', fields=['course',' course_name'], filters = {'parent': program})
return courselist
@frappe.whitelist()
def get_program_courses(doctype, txt, searchfield, start, page_len, filters):
if filters.get('program'):
return frappe.db.sql("""select course, course_name from `tabProgram Course`
where parent = %(program)s and course like %(txt)s {match_cond}
order by
if(locate(%(_txt)s, course), locate(%(_txt)s, course), 99999),
idx desc,
`tabProgram Course`.course asc
limit {start}, {page_len}""".format(
match_cond=get_match_cond(doctype),
start=start,
page_len=page_len), {
"txt": "%{0}%".format(txt),
"_txt": txt.replace('%', ''),
"program": filters['program']
})
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""-----------------------------------------------------------------------------
Script Name: NEXUS Matchup
Description: Match measurements between two or more datasets.
Created By: Alice Yepremyan
Date: 12/12/2019
-----------------------------------------------------------------------------"""
import arcpy
import json
import requests
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
arcpy.env.overwriteOutput = True
def show_plot(x_data, y_data, x_label, y_label):
"""
Display a simple line plot.
:param x_data: Numpy array containing data for the X axis
:param y_data: Numpy array containing data for the Y axis
:param x_label: Label applied to X axis
:param y_label: Label applied to Y axis
"""
np.random.seed(19680801)
plt.figure(figsize=(10, 5), dpi=100)
plt.scatter(x_data, y_data, alpha=0.5)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
# Get the input parameters
host_url = arcpy.GetParameterAsText(0)
primary = arcpy.GetParameterAsText(1)
secondary = arcpy.GetParameterAsText(2)
input_feature = arcpy.GetParameter(3)
start_time = pd.to_datetime(arcpy.GetParameterAsText(4)).strftime('%Y-%m-%dT%H:%M:%SZ')
end_time = pd.to_datetime(arcpy.GetParameterAsText(5)).strftime('%Y-%m-%dT%H:%M:%SZ')
parameter = arcpy.GetParameterAsText(6)
depth_min = arcpy.GetParameterAsText(7)
depth_max = arcpy.GetParameterAsText(8)
tt = arcpy.GetParameterAsText(9)
rt = arcpy.GetParameterAsText(10)
platforms = arcpy.GetParameterAsText(11)
# get coordinates by calculating geometric attributes
arcpy.MakeFeatureLayer_management(input_feature, "layer")
arcpy.AddGeometryAttributes_management("layer", "EXTENT")
rows = arcpy.SearchCursor("layer", fields="EXT_MIN_X;EXT_MIN_Y;EXT_MAX_X;EXT_MAX_Y")
row = next(rows)
min_lon = row.getValue("EXT_MIN_X")
max_lon = row.getValue("EXT_MAX_X")
min_lat = row.getValue("EXT_MIN_Y")
max_lat = row.getValue("EXT_MAX_Y")
# Build the HTTP request
url = f"https://{host_url}/match_spark?primary={primary}&matchup={secondary}&startTime={start_time}&endTime={end_time}&tt={tt}&rt={rt}&b={max_lat},{min_lon},{min_lat},{max_lon}&platforms={platforms}¶meter={parameter}&matchOne=true&depthMin={depth_min}&depthMax={depth_max}"
# url = "https://{}/match_spark?primary=AVHRR_OI_L4_GHRSST_NCEI&matchup=spurs&startTime=2013-10-01T00:00:00Z&endTime=2013-10-30T23:59:59Z&tt=86400&rt=10000.0&b=-30,15,-45,30&platforms=1,2,3,4,5,6,7,8,9¶meter=sst&matchOne=true&depthMin=0&depthMax=5"
# Report a success message
arcpy.AddMessage("Url received, getting json")
ts = json.loads(str(requests.get(url).text))
satellite = []
in_situ = []
for data in ts ['data']:
for matches in data ['matches']:
satellite.append(data['sea_water_temperature'])
in_situ.append(matches['sea_water_temperature'])
# Plot matchup
show_plot(in_situ, satellite, secondary+' (c)', primary+' (c)')
|
"""
Refer from https://github.com/statsu1990/yoto_class_balanced_loss
"""
from multiprocessing import Pool
import random
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, Subset
from sklearn.model_selection import train_test_split
"""
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
"""
transform = transforms.Compose(
[transforms.ToTensor(),])
class ImbalancedCIFAR10(Dataset):
def __init__(self, imbal_class_prop, root='../data', train=True, download=True, transform=transform):
self.dataset = torchvision.datasets.CIFAR10(
root=root, train=train, download=download, transform=transform)
self.train = train
self.imbal_class_prop = imbal_class_prop
self.idxs = self.resample()
def resample(self):
'''
Resample the indices to create an artificially imbalanced dataset.
'''
# Get class indices for resampling
targets, class_counts = np.array(self.dataset.targets), 10
classes, class_datasize = torch.tensor(self.dataset.targets).unique(return_counts=True)
class_indices = [np.where(targets == i)[0] for i in range(class_counts)]
# Reduce class count by proportion
self.imbal_class_counts = [
int(count * prop)
for count, prop in zip(class_datasize, self.imbal_class_prop)
]
# Get class indices for reduced class count
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
idxs = []
for c in range(class_counts):
imbal_class_count = self.imbal_class_counts[c]
idxs.append(class_indices[c][:imbal_class_count])
# print(f'Label {c}, {classes[c]} Data Size: {imbal_class_count}')
idxs = np.hstack(idxs)
self.labels = targets[idxs]
return idxs
def __getitem__(self, index):
img, target = self.dataset[self.idxs[index]]
return img, target
def __len__(self):
return len(self.idxs)
def separate_data(dataset, indices, minority=(2, 4, 9)):
class_dict = {i:[] for i in range(10)}
data_len = len(indices)
for idx in range(data_len):
_, label = dataset[idx]
class_dict[label].append(indices[idx])
# for k in class_dict.keys():
# print(f'Label {k}: {len(class_dict[k])}')
d1_indices = [class_dict[k] if k in minority else class_dict[k][:len(class_dict[k])//2] for k in class_dict.keys() ]
d2_indices = [class_dict[k] if k in minority else class_dict[k][len(class_dict[k])//2:] for k in class_dict.keys() ]
return sum(d1_indices, []), sum(d2_indices, [])
if __name__ == '__main__':
cifar10 = SeparateCIFAR10()
d1, d2, val = cifar10.train_val_split()
train_imbalanced_loader = torch.utils.data.DataLoader(d1)
d2_loader = torch.utils.data.DataLoader(d2)
val_loader = torch.utils.data.DataLoader(val)
"""
# train_imbalance_class_ratio = np.hstack(([0.1] * 5, [1.0] * 5))
train_imbalance_class_ratio = np.array([1., 1., .5, 1., .5, 1., 1., 1., 1., .5])
train_imbalanced_dataset = ImbalancedCIFAR10(train_imbalance_class_ratio)
train_imbalanced_loader = torch.utils.data.DataLoader(
train_imbalanced_dataset, batch_size=4, sample=True, num_workers=4)
"""
import matplotlib.pyplot as plt
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
dataiter = iter(train_imbalanced_loader)
images, labels = dataiter.next()
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
imshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
|
# Author:ambiguoustexture
# Date: 2020-03-04
import json
from pymongo import MongoClient
from bson.objectid import ObjectId
def support_ObjectId(obj):
"""Since ObjectId cannot be json-encoded, convert it to a string type
return: string converted from ObjectId
"""
if isinstance(obj, ObjectId):
return str(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
if __name__ == '__main__':
client = MongoClient()
db = client.db_MusicBrainz
collection = db.artists
for i, artist in enumerate(collection.find({'name': 'Queen'}), start = 1):
print('Record {}:\n{}'.format(i, json.dumps(\
artist,\
indent='\t', \
ensure_ascii=False, \
sort_keys=True,\
default=support_ObjectId\
)))
|
#--------------------------------- IMPORTS ------------------------------------
import shapely
#------------------------ FUNCTION DEFINITIONS ------------------------------
def generate_kitty_lines(kitty_img, n_pts=800, alpha_factor=0.3, show_plt = False):
"""
kitty_img is an image of a cat, with interior and border pixels either 1.0 or
3.0
returns 'hull_lines', which is a collection of lines, width=4
"""
fig_h = kitty_trimap.shape[0]
fig_w = kitty_trimap.shape[1]
# Hard-code the approximate area of the foreground, here 150 pixels
# x 150 pixels
approx_kitty_area = 150**2
alpha = alpha_factor*(n_pts/approx_kitty_area)**0.5
stop_loop = False
while not stop_loop:
pts = get_random_pts_in_trimap(kitty_img, n_pts)
sh_pts = [geometry.Point(p) for p in pts]
concave_hull, alpha_shape_points = alpha_shape(sh_pts,alpha=alpha)
stop_loop = type(concave_hull)==shapely.geometry.polygon.Polygon
l = list(concave_hull.exterior.coords)
l_list = []
for i in range(len(l)-1):
ln = [l[i], l[i+1]]
l_list.append(ln)
linewidths = 4
hull_lines = LineCollection(l_list, colors=[(0,0,0,1)],linewidths=linewidths)
if show_plt:
figsize=4
plt.figure(figsize=(figsize,figsize*fig_h/fig_w))
plt.gca().add_collection(hull_lines)
plt.xlim(0,kitty_trimap.shape[1])
plt.ylim(0,kitty_trimap.shape[0])
plt.show()
return hull_lines
# ------------------------------- MAIN BODY -----------------------------------
import os
import json
os.chdir('C:\\Users\\James\\Documents\\data science education\\GA\\DSI\\capstone\\stars\\code')
%run -i alpha_utils
%run -i train_prep_lib
os.chdir('C:\\Users\\James\\Documents\\data science education\\GA\\DSI\\capstone\\stars')
kitty_path = r'./data/hello kitty/hello-kitty-trimap.tif'
kitty_trimap = skimage.io.imread(kitty_path)
#---------------------------------------------------------
folder =r'./data/hello kitty/polygonized/july10'
os.mkdir(folder)
n_pts=500
alpha_factor=0.4
n_kitties = 20
rnd_seed = 0
outfile_name_lst = []
np.random.RandomState(seed=rnd_seed)
for k in range(n_kitties):
lines = generate_kitty_lines(kitty_trimap, n_pts=n_pts,
alpha_factor=alpha_factor, show_plt = False)
outfile_name = folder+ '/' + str(k) + '.png'
outfile_name_lst.append(outfile_name)
# This plotting function displays things upsidedown reltive to imshow()
plot_lines_and_output_to_file(lines, outfile_name, format='png')
kitty_dict=dict([('alpha_factor',alpha_factor),
('outfile_name_lst', outfile_name_lst),
('n_pts', n_pts),
('n_sets', n_kitties),
('rnd_seed',rnd_seed)])
dict_file = folder+'/kitty_dict.json'
the_json = json.dumps(kitty_dict)
f = open(dict_file,'w')
f.write(the_json)
f.close()
#---------------------------------------------------------
folder =r'./data/hello kitty/polygonized/july12'
os.mkdir(folder)
n_pts=500
alpha_factor=0.4
n_kitties = 20
rnd_seed = 1
outfile_name_lst = []
np.random.RandomState(seed=rnd_seed)
for k in range(n_kitties):
lines = generate_kitty_lines(kitty_trimap, n_pts=n_pts,
alpha_factor=alpha_factor, show_plt = False)
outfile_name = folder+ '/' + str(k) + '.png'
outfile_name_lst.append(outfile_name)
# This plotting function displays things upsidedown reltive to imshow()
plot_lines_and_output_to_file(lines, outfile_name, format='png')
kitty_dict=dict([('alpha_factor',alpha_factor),
('outfile_name_lst', outfile_name_lst),
('n_pts', n_pts),
('n_sets', n_kitties),
('rnd_seed',rnd_seed)])
dict_file = folder+'/kitty_dict.json'
the_json = json.dumps(kitty_dict)
f = open(dict_file,'w')
f.write(the_json)
f.close()
|
import xlrd
def add_space(val):
return ' ' * val
file_name = 'mon.xlsx'
dir_name = 'D://Python_projects//tests//'
out_file = 'out.sql'
lst = []
s = ' '
tmp_table = 'tmp.lo_monetka_not_working'
del_table = f'if OBJECT_ID(\'{tmp_table}\') is NOT NULL\ndrop table {tmp_table};\n\n'
alter_table = f"""ALTER TABLE {tmp_table}
ADD CONSTRAINT [lo_monetka_not_working_pk] PRIMARY KEY ([ID])
GO"""
rd = xlrd.open_workbook(file_name)
sheet = rd.sheet_by_index(0)
for rownum in range(sheet.nrows):
row = sheet.row_values(rownum)
lst.append(row)
with open(out_file, 'w') as f:
f.write(del_table)
f.write(f'WITH R ([ID],[SHOPID],[NAME],[NOT_WORKING],[ADDRESS_ID])\n AS (\n')
for item in enumerate(lst[1:], start=1):
f.write(f'{add_space(4)}select {item[0]},\'{item[1][0]}\',\'{item[1][1]}\',\'{item[1][2]}\', Null\n')
if item[1] != lst[-1]:
f.write(f'{add_space(6)}union all\n')
f.write(')\n')
f.write(f'{add_space(4)}Select * into {tmp_table} from R\n\n')
f.write(f'{add_space(4)}{alter_table}')
|
class Node():
def __init__(self, e, n):
self.element = e
self.next = n
def getElement(self):
return self.element
def getNext(self):
return self.next
def setElement(self, e):
self.element = e
def setNext(self, n):
self.next = n
class CircularList():
def __init__(self):
self.cursor = None
self.size = 0
def size(self):
return self.size
def getCursor(self):
return self.cursor
def advance(self):
self.cursor = self.cursor.getNext();
def add(self, e):
if self.cursor == None:
e.setNext(e)
self.cursor = e
else:
e.setNext(self.cursor.getNext())
self.cursor.setNext(e)
self.size+= 1
def remove(self):
n = self.cursor.getNext()
if n == self.cursor:
self.cursor = None
else:
self.cursor.setNext(n.getNext())
n.setNext(None)
self.size -= 1
return n
def toString(self):
if self.cursor == None:
return "[]"
else:
s = "[.." + self.cursor.getElement()
n = self.cursor
self.advance()
while n != self.cursor:
s += ", " + self.cursor.getElement()
self.advance()
return s + "...]"
if __name__ == "__main__":
cl = CircularList()
cl.add(Node("Tanzim", None))
cl.add(Node("Bob", None))
cl.advance()
print cl.getCursor().getElement()
cl.add(Node("Jen", None))
print cl.toString()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 10:05:48 2019
@author: andrewbartels1
"""
# packages to import here
import numpy as np
import matplotlib.pyplot as plt
# functions here
def make_phone_data(nphones, sound_speed, spacing, noise_pwr, signal_pwr,
sampling_rate, samples, signal_hz, signal_dir):
'''function to make simulated phone data'''
# generate data matrix, samples x nphones
data = np.sqrt(noise_pwr)*np.random.randn(samples, nphones)
# make actual signal
time = np.linspace(0, samples/sampling_rate, samples, endpoint=False);
signal = np.sqrt(signal_pwr)*np.random.randn(samples)
# make replica vector to duplicate and delay our time signal
time_delays = spacing/sound_speed
fft_freqs = np.matrix(np.linspace( 0, sampling_rate, samples, endpoint=False))
time_delays = np.matrix(np.cos(signal_dir)*time_delays)
spacial_filt = np.exp(2j*np.pi*fft_freqs.transpose()*time_delays)
spacial_filt = np.array(spacial_filt).transpose()
replicas = np.fft.irfft(np.array(np.fft.fft(signal))*spacial_filt, samples, 1 )
# add to data and then return it
data = data + replicas.transpose()
return data, time
def cbf(nphones, sound_speed, spacing, look_dirs, samples, phone_data):
'''function to do conventional beamforming'''
# allocate space to put data
bf_data = np.zeros((samples, len(look_dirs)))
# find time lags between phones and the bf matrix
time_delays = np.matrix( (spacing/sound_speed))
fft_freqs = np.matrix(np.linspace( 0, sampling_rate, samples, endpoint=False)).transpose()
print(fft_freqs.shape)
print(time_delays.shape)
for ind, direction in enumerate(look_dirs):
spacial_filt = 1.0/nphones*np.exp(-2j*np.pi*fft_freqs*time_delays*np.cos(direction))
# fft the data, and let's beamform.
bf_data[:,ind] = np.sum(np.fft.irfft( np.fft.fft(phone_data,samples,0)*np.array(spacial_filt), samples, 0), 1)
return bf_data
def make_plots( nphones, spacing, phone_data, bf_data, time, sampling_rate,
time_length, look_dirs, ind ):
'''function to make our plots'''
plt.figure()
# plot data in each phone
plt.plot( time, phone_data )
plt.title( 'received signal by phone' )
plt.xlabel( 'time in seconds' )
plt.ylabel( 'amplitude' )
plt.savefig( '1_phone_data_' + str(ind) + '.png' )
plt.clf()
# plot power in each phone
plt.plot( sum(abs(np.fft.fft( phone_data, samples, 0))**2/samples**2, 0), \
'-*' )
plt.title( 'power in each phone' )
plt.xlabel( 'phone number label' )
plt.ylabel( 'power in Watts' )
plt.savefig( '2_phone_pwr_' + str(ind) + '.png' )
plt.clf()
# plot data in each beam
plt.plot( time, bf_data )
plt.title( 'received signal by beam' )
plt.xlabel( 'time in seconds' )
plt.ylabel( 'amplitude' )
plt.savefig( '3_beam_data_' + str(ind) + '.png' )
plt.clf()
# plot power in each beam
plt.plot( look_dirs*180/np.pi, sum(abs(np.fft.fft( bf_data, \
samples, 0))**2/samples**2, 0), '-*' )
plt.title( 'power in each beam' )
plt.xlabel( 'beam direction in degrees' )
plt.ylabel( 'power in Watts' )
plt.savefig( '4_beam_pwr_' + str(ind) + '.png' )
plt.clf()
return
# main stuff here
if __name__ == "__main__":
directions = np.array([180])*np.pi/180 #where the signal will come from
for ind, signal_dir in enumerate(directions):
nphones = 32; #32 phones
sound_speed = 343 #meters per second sound speed
spacing = np.linspace(0, 64, nphones) #first and second phone 2 m apart
noise_pwr = 0.01 #background noise power is .1
signal_pwr = 1 # sinusoidal signal power is .1 apart
sampling_rate = 250 #100 hz sampling rate
samples = 5000
time_length = samples/sampling_rate
signal_hz = 100 #150hz tone generated somewhere
look_dirs = np.arccos(np.linspace(-1, 1, 180)) #cosine spacing, 180 dirs
# make our phone data
phone_data, time = make_phone_data(nphones, sound_speed, spacing, noise_pwr,
signal_pwr, sampling_rate, samples,
signal_hz, signal_dir)
# do the beamforming
bf_data = cbf(nphones, sound_speed, spacing, look_dirs, samples,
phone_data)
# make our plots
make_plots( nphones, spacing, phone_data, bf_data, time, sampling_rate,
time_length, look_dirs, ind )
|
from __future__ import print_function, division
import os
import logging
from isochrones import StarModel
from math import log10, sqrt
from .data import dirname, STARMODELDIR
class GaiaDR1_StarModel(StarModel):
@property
def corrected_parallax(self):
if not hasattr(self, '_corrected_parallax'):
d = {}
for s, (val, unc) in self.obs.parallax.items():
if val < 1.:
d[s] = val, sqrt(unc**2 + 0.3**2)
else:
offset = -0.08 - 0.27*log10(val)
d[s] = val - offset, sqrt(unc**2 + 0.3**2)
self._corrected_parallax = d
return self._corrected_parallax
def lnlike(p, **kwargs):
lnl = super(GaiaDR1_StarModel, self).lnlike(p, **kwargs)
# apply correction for DR1 parallax systematic uncertainty
pardict = self.obs.p2pardict(p)
# First, *undo* the parallax lnl, then redo it with systematic correction
for s,(val,err) in self.obs.parallax.items():
dist = pardict['{}_0'.format(s)][3]
mod = 1./dist * 1000.
# Undo base StarModel parallax lnl term
lnl += 0.5*(val-mod)**2/err**2
# Redo with corrected values
val, err = self.corrected_parallax[s]
lnl += -0.5*(val-mod)**2/err**2
return lnl
def get_starmodel(i, modelname='mist_starmodel_single', rootdir=STARMODELDIR):
d = dirname(i, rootdir=rootdir)
modfile = os.path.join(d,'{}.h5'.format(modelname))
logging.debug('loading model from {}'.format(modfile))
return GaiaDR1_StarModel.load_hdf(modfile)
|
## Santosh Khadka
# https://leetcode.com/problems/roman-to-integer/
'''
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, 2 is written as II in Roman numeral, just two one's added together.
12 is written as XII, which is simply X + II. The number 27 is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right.
However, the numeral for four is not IIII. Instead, the number four is written as IV.
Because the one is before the five we subtract it making four.
The same principle applies to the number nine, which is written as IX.
There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer.
Example 1:
Input: s = "III"
Output: 3
Example 2:
Input: s = "IV"
Output: 4
Example 3:
Input: s = "IX"
Output: 9
Example 4:
Input: s = "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: s = "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
'''
#class Solution(object):
def intToRoman(num):
"""
:type s: str
:rtype: int
"""
if s>0 and s<5: # I - 1
print("0-4")
if s>=5 and s<10: # V - 5
print("5-9")
if s>=10 and s<50: # X - 10
print("10-49")
if s>=50 and s<100: # L - 50
print("50-99")
if s>=100 and s<500: # C - 100
print("100-499")
if s>=500 and s<999: # D - 500
print("500-999")
if s>=1000 and s<=3999: # M - 1000
print("1000-3999")
else:
print("ERROR - Out of range: [1, 3999]")
def romanToInt_v1(s): # WORKS #################################
"""
:type s: str
:rtype: int
Constraints:
1 <= s.length <= 15
s contains only the characters ('I', 'V', 'X', 'L', 'C', 'D', 'M').
It is guaranteed that s is a valid roman numeral in the range [1, 3999].
"""
s = s+"A"
n = 0
value = 0
for x in s:
# print(s[n])
if s[n] == "A":
return value
if 'M' in s[n]: # 1000s
value += 1000
elif 'D' in s[n]: # 500s
value += 500
elif 'C' in s[n] and 'D' in s[n+1]: # 400
value += 400
n += 1
elif 'C' in s[n] and 'M' in s[n+1]: # 900
value += 900
n += 1
elif 'C' in s[n]: # 100s
value += 100
elif 'L' in s[n]: # 50s
value += 50
elif 'X' in s[n] and 'L' in s[n+1]: # 40
value += 40
n += 1
elif 'X' in s[n] and 'C' in s[n+1]: # 90
value += 90
n += 1
elif 'X' in s[n]: # 10s
value += 10
elif 'V' in s[n]: # 5s
value += 5
elif 'I' in s[n] and 'V' in s[n+1]: # 4
value += 4
n += 1
elif 'I' in s[n] and 'X' in s[n+1]: # 9
value += 9
n += 1
elif 'I' in s[n]: # 1
value += 1
else:
print("ERROR: INVALID INPUT")
n += 1
return value
print(romanToInt_v1("III")) # 3
print("=======================")
print(romanToInt_v1("IV")) # 4
print("=======================")
print(romanToInt_v1("IX")) # 9
print("=======================")
print(romanToInt_v1("LVIII")) # 58
print("=======================")
print(romanToInt_v1("MCMXCIV")) # 1994
def romanToInt_v2(s):
# dictionary
romans = {'I':1, 'IV':4, 'IX':9, 'V':5, 'X':10, 'XL':40, 'XC':90, 'L':50, 'C':100, 'CD':400, 'CM':900, 'D':500, 'M':1000}
def romanToInt_v3(s):
for x in s:
if ('IV' in s) or ('IX' in s) or ('XL' in s) or ('XC' in s) or ('CM' ):
pass
# romanToInt("III")
# romanToInt("IV")
# romanToInt("IX")
# romanToInt("LVIII")
# romanToInt("MCMXCIV")
|
import numpy as np
import scipy.sparse as sp
from ..base_transforms import SparseTransform
from ..transform import Transform
from ..decorators import multiple
__all__ = ['SparseAdjToEdge', 'sparse_adj_to_edge']
@Transform.register()
class SparseAdjToEdge(SparseTransform):
def __call__(self, adj_matrix: sp.csr_matrix):
return sparse_adj_to_edge(adj_matrix)
@multiple()
def sparse_adj_to_edge(adj_matrix: sp.csr_matrix):
"""Convert a Scipy sparse matrix to (edge_index, edge_weight) representation"""
adj_matrix = adj_matrix.tocoo(copy=False)
edge_index = np.asarray((adj_matrix.row, adj_matrix.col))
edge_weight = adj_matrix.data.copy()
return edge_index, edge_weight
|
#Classes & Objects
"Like java python uses OOP"
"To create a class use the class keyword"
class Myclass:
x=5
"To create an object we can use the class name assgining it to a new variable"
p1 = Myclass()
print(p1.x)
"The __init__ function is like a constructor function in java, this is executed when the class is being initiated, essentially it can be used to assign values"
class Person:
def __init__(self , name , age):
self.name =name
self.age = age
"Creating an object and passing in the name and age values"
p1 = Person("Tomi" , 21)
print(p1.name)
"You can also have methods in python classes they take in self to refer to the variables in the constructor"
class nicePerson:
def __init__(self , name , age):
self.name = name
self.age = age
def greeting(self):
print("Hello my name is", self.name)
np = nicePerson("Tomi",21)
np.greeting()
"The self parameter itself is a refernece to the current instance of the calss almost the same as this in java however it can be called anything but it must be the first parameter of any method in the class"
"To delete an object just use the del keyword"
"if you wnat and empty calss use the pass keyword"
#Inheritance
"Python allows for inheritance like other oop languages"
"1st you create the parent class"
class Parent:
def __init__(self , name , age):
self.name =name
self.age = age
def printname(self):
print(self.name)
"then you create the child class you send the paret calss as a parameter and use the pass keyword if you do not want to add any more properteos or methods"
class child(Parent):
pass
x= child("Tomi" , 21)
x.printname()
"__init__ can be added to a child calss as well when this is added the child wil no longer inherit the parents init funciton as it overides the parent one"
"To keep the inheritance you must call the parents init method as a property"
class Student(Parent):
def __init__(self,fname , lname ,age):
Parent.__init__(self,fname,age)
a = Student("Bob","Whitney",21)
print(a.age)
a.printname()
"Its better to use the super() method asthis just inherits all the methods and properties when using super() you do not need to call self"
"you can then add properties and methods"
class newStudent(Parent):
def __init__(self, fname , lname , age, year):
super().__init__(fname,age)
self.graduationyear = year
self.lname = lname
def welcome(self):
print("Welcome",self.name,self.age, "to the calss of", self.graduationyear)
def last(self):
print(self.lname)
b = newStudent("Tomi", "Ilori" , 21, 2020)
print(b.graduationyear)
b.printname()
b.welcome()
b.last()
|
from .forms import UserProfileLoginForm
class LoginFormMiddleware(object):
"""
The purpose of this middleware is to include the login form in every GET
request
"""
def process_request(self, request):
"""Process the request if the method is Method is get, provide a login
form"""
if request.method == 'GET':
login_form = UserProfileLoginForm()
request.login_form = login_form
|
import argparse
import resource
import time
from Puzzle import Puzzle
from Solver import Solver
def export(result: tuple, total_time: float):
file = open('output.txt', 'w')
file.write("path_to_goal: " + str(result[0]))
file.write("\ncost_of_path: " + str(len(result[0])))
file.write("\nnodes_expanded: " + str(result[3]))
file.write("\nfringe_size: " + str(len(result[2])))
file.write("\nmax_fringe_size: " + str(result[5]))
file.write("\nsearch_depth: " + str(result[1].depth))
file.write("\nmax_search_depth: " + str(result[4]))
file.write("\nrunning_time: " + format(total_time, '.8f'))
file.write("\nmax_ram_usage: " + format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000.0, '.8f'))
file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('algorithm')
parser.add_argument('board')
args = parser.parse_args()
initial_board: list = [int(e) for e in args.board.split(",")]
goal_board: list = [1, 2, 3, 4, 5, 6, 7, 8, 0]
root_puzzle: Puzzle = Puzzle(initial_board, goal_board)
solver: Solver = Solver()
function_map: dict = {
'bfs': solver.breadth_first_search,
'dfs': solver.depth_first_search,
'idfs': solver.iterative_depth_first_search,
'ast': solver.a_star,
'gfs': solver.best_first_search
}
start_time: float = time.time()
result: tuple = function_map[args.algorithm](root_puzzle)
stop_time: float = time.time()
export(result, stop_time - start_time)
if __name__ == '__main__':
main()
|
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
def convolution(img, filter):
"""
Convolving an image by a filter
:param img: input image, uint8
:param filter: m*n filter with float values
:return: result of convolving image
"""
if filter.shape[0] % 2 == 0 or filter.shape[1] % 2 == 0:
ValueError('Filter size should be odd')
# size of filter
m, n = filter.shape
# rotating filter 180 degree
filter_90_r = np.array(list(zip(*filter[::-1])))
filter_r = np.array(list(zip(*filter_90_r[::-1])))
img = np.float32(img)
# allocate an image for output
img_out = np.zeros(shape=img.shape, dtype=np.float32)
# pad image with zero
img_pad = np.pad(array=img, pad_width=[(m//2,m//2),(n//2,n//2)], mode='constant', constant_values=0)
#print(img_out.shape, img_pad.shape)
# convolving
p = 0
q = 0
for i in range(m//2, img_pad.shape[0]-(m//2)):
for j in range(n // 2, img_pad.shape[1] - (n // 2)):
#print(i,j, '---', p, q)
# put filter on position i,j
neighbour_hood = img_pad[i-(m//2):i+(m//2)+1, j-(n//2):j+(n//2)+1]
# point-wise multiplication
multi_neig = np.multiply(neighbour_hood, filter_r)
# sum of products
sum_neig = np.sum(np.sum(multi_neig))
img_out[p, q] = sum_neig
q = q + 1
q = 0
p = p + 1
#return np.uint8(img_out)
return img_out
# read image
img = cv.imread(filename='.\\Images\\Edge1.jpg', flags=cv.IMREAD_GRAYSCALE)
sobel_3 = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])
sobel_5 = np.array([[-5,-4,0,4,5],[-8,-10,0,10,8],[-10,-20,0,20,10],[-8,-10,0,10,8],[-5,-4,0,4,5]])
sobel_7 = np.array([[-3/18,-2/13,-1/10,0,1/10,2/13,3/18],[-3/13,-2/8,-1/5,0,1/5,2/8,3/13],
[-3/10,-2/5,-1/2,0,1/2,2/5,3/10],[-3/9,-2/4,-1/1,0,1/1,2/4,3/9],
[-3/10,-2/5,-1/2,0,1/2,2/5,3/10],[-3/13,-2/8,-1/5,0,1/5,2/8,3/13],
[-3/18,-2/13,-1/10,0,1/10,2/13,3/18]])
sobel_9 = np.array([[-4/32,-3/25,-2/20,-1/17,0/16,1/17,2/20,3/25,4/32],[-4/25,-3/18,-2/13,-1/10,0/9,1/10,2/13,3/18,4/25],
[-4/20,-3/13,-2/8,-1/5,0/4,1/5,2/8,3/13,4/20],[-4/17,-3/10,-2/5,-1/2,0/1,1/2,2/5,3/10,4/17],
[-4/16,-3/9,-2/4,-1/1,0,1/1,2/4,3/9,4/16],[-4/17,-3/10,-2/5,-1/2,0/1,1/2,2/5,3/10,4/17],
[-4/20,-3/13,-2/8,-1/5,0/4,1/5,2/8,3/13,4/20],[-4/25,-3/18,-2/13,-1/10,0/9,1/10,2/13,3/18,4/25],
[-4/32,-3/25,-2/20,-1/17,0/16,1/17,2/20,3/25,4/32]])
sobels = [sobel_3, sobel_5, sobel_7, sobel_9]
sobels_size = [3,5,7,9]
image_mags = []
# apply filters on image
for id, sobel in enumerate(sobels):
# convolving sobel
sobel_img_x = convolution(img=img, filter=sobel)
sobel_img_y = convolution(img=img, filter=sobel.transpose())
fig = plt.figure(figsize=(8, 8))
plt.title('size {}'.format(sobels_size[id]))
ax = plt.subplot(2,2,1)
plt.imshow(img, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
ax.set_title('Input Image')
ax = plt.subplot(2, 2, 2)
plt.imshow(sobel_img_x, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
ax.set_title('Sobel_X {}*{}'.format(sobels_size[id], sobels_size[id]))
ax = plt.subplot(2, 2, 4)
plt.imshow(sobel_img_y, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
ax.set_title('Sobel_Y {}*{}'.format(sobels_size[id], sobels_size[id]))
ax = plt.subplot(2, 2, 3)
# magnitude
img_mag = np.sqrt(np.float32(sobel_img_x)**2 + np.float32(sobel_img_y)**2)
image_mags.append(img_mag)
plt.imshow(img_mag, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
ax.set_title('Sobel Magnitude {}*{}'.format(sobels_size[id], sobels_size[id]))
fig = plt.figure()
for i in range(1, 5):
ax = plt.subplot(2, 2, i)
plt.imshow(image_mags[i-1], cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
ax.set_title('Sobel Magnitude {}*{}'.format(sobels_size[i-1], sobels_size[i-1]))
plt.show()
# Explanation about sobel with different sized
# https://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size/41065243#41065243
|
import numpy as np
from Utils import ArtificialNeuralNetwork
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
def ANNTrain(filename):
# result is normally between 0.55 - 0.75 but may fluctuate
input = np.genfromtxt(filename, delimiter=",", skip_header=1)
xtrain = input[:730,:8]
ytrain = input[:730, 8:]
xtest = input[730:,:8]
ytest = input[730:, 8:]
# scaler = StandardScaler()
# scaler.fit(xtrain)
# X_train = scaler.transform(xtrain)
# X_test = scaler.transform(xtest)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(xtrain)
X_test = scaler.fit_transform(xtest)
print("Early Stopping = False")
mlp = MLPRegressor(solver="lbfgs", activation='relu', learning_rate_init=0.01, hidden_layer_sizes=(7), early_stopping = False, max_iter = 120000,
learning_rate = 'invscaling', alpha=0.2)
max1 = 0
print("Looping through 30 tries... ")
for i in range(30):
mlp.fit(X_train,ytrain.ravel())
# print(i, " ", mlp.score(X_test, ytest))
if mlp.score(X_test, ytest) > max1:
max1 = mlp.score(X_test, ytest)
print("Max Score: ", max1)
print()
# Early Stopping prevents overfitting
print("Early Stopping = True")
mlp = MLPRegressor(solver="lbfgs", activation='relu', learning_rate_init=0.01, hidden_layer_sizes=(7), early_stopping = True, max_iter = 120000,
learning_rate = 'invscaling', alpha=0.2, validation_fraction=0.01)
max2 = 0
print("Looping through 30 tries... ")
for i in range(30):
mlp.fit(X_train,ytrain.ravel())
# print(i, " ", mlp.score(X_test, ytest))
if mlp.score(X_test, ytest) > max2:
max2 = mlp.score(X_test, ytest)
print("Max Score: ", max2)
print()
if max1 > max2:
print("Without Early stopping better!")
elif max2 > max1:
print("With Early Stopping Better!")
else:
print("Results About the Same!")
# To make the results better, I could have shuffled the data instead of choosing the first 730 datapoints to train right away.
ANNTrain("Concrete_Data.csv")
|
from django.db import models
class CheckIn(models.Model):
class Meta:
db_table = 'check_in'
unique_together = ('booking_ref_num', 'passenger_first_name', 'passenger_last_name', 'departure_date')
booking_ref_num = models.CharField('Booking reference number', max_length=10, null=False)
passenger_first_name = models.CharField('First name of the passenger to check in', max_length=50, null=False)
passenger_last_name = models.CharField('Last name of the passenger to check in', max_length=50, null=False)
to_city = models.CharField('Last name of the passenger to check in', max_length=50, null=False)
from_city = models.CharField('Last name of the passenger to check in', max_length=50, null=False)
check_in_time = models.DateTimeField('Date and time check in is allowed', null=False)
departure_flight_time = models.DateTimeField('Date and time the flight will take off', null=False)
departure_date = models.DateField('Date and time the flight will take off', null=False)
status = models.CharField('Status of the check-in', max_length=10, null=False, default='READY')
create_ts = models.DateTimeField('Date and Time the check in was created', auto_now_add=True, null=False)
update_ts = models.DateTimeField('Date and Time the check in information was updated', null=False, auto_now=True)
def __str__(self):
return self.passenger_first_name + ' ' + self.passenger_last_name + ' - ' + self.booking_ref_num
|
#!/usr/bin/python
import math, os, sys, argparse
from random import randint as random
from time import time as timer
class Task:
def __init__(self, id, time):
self.id = id+1
self.time = time
def __repr__(self):
return "id=%d t=%d" % (self.id, self.time)
def rec(tasks, lines):
task = tasks[len(lines)-1]
before = lines[-1]
new_line = list(lines[-1])
current_time = 0
for i in before:
if i == "T" and task.time+current_time<len(before):
new_line[task.time+current_time] = "T"
current_time += 1
lines.append(new_line)
if len(lines) > len(tasks):
return lines
return rec(tasks, lines)
def back(output, tasks, pos, results):
for line_id in range(0, len(output)):
if output[line_id][pos] == "T":
results.append(tasks[line_id-1])
if pos-tasks[line_id-1].time <= 0:
return results
return back(output, tasks, pos-tasks[line_id-1].time, results)
def main(args):
tasks = []
print vars(args)
for time in open('input.txt'):
tasks.append(Task(len(tasks), int(time)))
# sortuj po czasie
tasks = sorted(tasks, key=lambda x: x.time, reverse=True)
# pierwszych blad
e = float(args.e[0])
n = int((1-2*e)/e)
print "n=", n
# jaki typ
fptas = args.fptas
if fptas:
S = 0
for task in tasks:
S += task.time
e = int(raw_input("podaj maksymalny blad dla schematu FPTAS "))
if(e > 0):
k = (e*S/(2*math.pi))
else:
k = 1
for task_id in range(0, len(tasks)):
tasks[int(task_id)].time = int(int(tasks[int(task_id)].time)/k)
# dla pierwszych n - pd
task_time = 0
for task in tasks[:n]:
task_time += task.time
time_start = timer()
p2_time = math.floor(task_time/2)
table = [[" "] * int(p2_time+1),]
table[0][0] = "T"
output = rec(tasks[:n], table)
results = []
back(output, tasks[:n], int(p2_time), results)
p1 = sorted(results, key=lambda x: x.id, reverse=False)
p2 = sorted(list(set(tasks[:n])-set(p1)), key=lambda x: x.id, reverse=False)
print "podzial zadan przed FPTAS", p1, p2
p1_time = 0
for p in p1:
p1_time += p.time
p2_time = 0
for p in p2:
p2_time += p.time
print "czasy przed FPTAS"
print p1_time
print p2_time
# reszte taskow cisne forem
for task in list(set(tasks)-set(tasks[:n])):
if p1_time < p2_time:
p1.append(task)
p1_time += task.time
else:
p2.append(task)
p2_time += task.time
time_end = timer()
# generate HTML output
if not args.output_json:
output_html_data = [{'count':0, 'tasks':[]},{'count':0, 'tasks':[]}]
for task in p1:
output_html_data[0]['count'] += task.time
for task in p2:
output_html_data[1]['count'] += task.time
output_html_max = max(output_html_data[0]['count'], output_html_data[1]['count'])
for task in p1:
output_html_data[0]['tasks'].append({'id': task.id, 'time': task.time, 'width': round(task.time*100.0/output_html_max, 5)})
for task in p2:
output_html_data[1]['tasks'].append({'id': task.id, 'time': task.time, 'width': round(task.time*100.0/output_html_max, 5)})
output_html = "<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\" />"
output_html += "<center class=\"process_data\">Processor #1 - time "+str(output_html_data[0]['count'])+"</br>"
output_html += "Processor #2 - time "+str(output_html_data[1]['count'])+"</center></br>"
output_html += "<div class=\"processor\">"
for task in output_html_data[0]['tasks']:
r = random(0,255)
g = random(0,255)
b = random(0,255)
output_html += "<div class=\"task\" onmouseover=\"tooltip.show('Task: "+str(task["id"])+"</br>Czas zadania "+str(task['time'])+"');\" onmouseout=\"tooltip.hide();\" style=\"color:rgb("+str(255-r)+","+str(255-g)+","+str(255-b)+");background-color:rgb("+str(r)+","+str(g)+","+str(b)+");width:"+str(task['width']*8)+"px\">"+str(task['id'])+"</div>"
output_html += "</div><div class=\"processor\">"
for task in output_html_data[1]['tasks']:
r = random(0,255)
g = random(0,255)
b = random(0,255)
output_html += "<div class=\"task\" onmouseover=\"tooltip.show('Task: "+str(task["id"])+"</br>Czas zadania "+str(task['time'])+"');\" onmouseout=\"tooltip.hide();\" style=\"color:rgb("+str(255-r)+","+str(255-g)+","+str(255-b)+");background-color:rgb("+str(r)+","+str(g)+","+str(b)+");width:"+str(task['width']*8)+"px\">"+str(task['id'])+"</div>"
output_html += "</div>"
# tooltip
output_html += "<script type=\"text/javascript\" language=\"javascript\" src=\"script.js\"></script>"
# time
output_html += "<div class=\"time\">Czas wykonania alogrytmu: %f sekund</div>" % (time_end-time_start,)
# tabelka przejsc
output_html += "<div id=\"table\"><div id=\"table2\"><table>"
for i in range(0, len(tasks[:n])+1):
output_html += "<tr><td>"+str(i)+"</td></tr>"
output_html += "</table></div><div id=\"table1\"><table>"
for line in output:
output_html += "<tr>"
for elm in line:
output_html += "<td>%s<td>" % (elm,)
output_html += "</tr>"
output_html += "</table></div></div>"
f = open("index.html", "w")
f.write(output_html)
f.close()
os.system("open index.html")
else:
p1_count = 0
p2_count = 0
for task in p1:
p1_count += task.time
for task in p2:
p2_count += task.time
print "%d\t%d\t%f" % (p1_count, p2_count, time_end-time_start)
parser = argparse.ArgumentParser()
parser.add_argument('-e', metavar='e', type=float, nargs='+', help='blad')
parser.add_argument('-output_json', action="store_true", default=False)
parser.add_argument('-fptas', action="store_true", default=False)
main(parser.parse_args(sys.argv[1:]))
|
from .. import Interpreter, adapter
from ..interface import Block
from typing import Optional
import random, math
class SubstringBlock(Block):
def will_accept(self, ctx : Interpreter.Context) -> bool:
dec = ctx.verb.declaration.lower()
return any([dec=="substr",dec=="substring"])
def process(self, ctx : Interpreter.Context) -> Optional[str]:
try:
if "-" in ctx.verb.parameter:
spl = ctx.verb.parameter.split("-")
start = int(float(spl[0]))
end = int(float(spl[1]))
return ctx.verb.payload[start:end]
else:
start = int(float(ctx.verb.parameter))
return ctx.verb.payload[start:]
except:
return None
|
import typing
import pathlib
import datetime
# types
# --------------------------------------------------------------------------------------
class Artifact:
"""Base class for all artifact types."""
class UnbuiltArtifact(Artifact, typing.NamedTuple):
"""The inputs needed to build an artifact.
Attributes
----------
workdir : pathlib.Path
Absolute path to the working directory used to build the artifact.
file : str
Path (relative to the workdir) of the file produced by the build.
recipe : Union[str, None]
Command used to build the artifact. If None, no command is necessary.
release_time: Union[datetime.datetime, None]
Time/date the artifact should be made public. If None, it is always available.
ready : bool
Whether or not the artifact is ready for publication. Default: True.
missing_ok : bool
If True and the file is missing after building, then no error is raised and the
result of the build is `None`.
"""
workdir: pathlib.Path
file: str
recipe: str = None
release_time: datetime.datetime = None
ready: bool = True
missing_ok: bool = False
class BuiltArtifact(Artifact, typing.NamedTuple):
"""The results of building an artifact.
Attributes
----------
workdir : pathlib.Path
Absolute path to the working directory used to build the artifact.
file : str
Path (relative to the workdir) of the file produced by the build.
returncode : int
The build process's return code. If None, there was no process.
stdout : str
The build process's stdout. If None, there was no process.
stderr : str
The build process's stderr. If None, there was no process.
"""
workdir: pathlib.Path
file: str
returncode: int = None
stdout: str = None
stderr: str = None
class PublishedArtifact(Artifact, typing.NamedTuple):
"""A published artifact.
Attributes
----------
path : str
The path to the artifact's file relative to the output directory.
"""
path: str
def _artifact_from_dict(dct):
"""Infers the artifact type from the dictionary and performs conversion."""
if "recipe" in dct:
type_ = UnbuiltArtifact
elif "returncode" in dct:
type_ = BuiltArtifact
else:
type_ = PublishedArtifact
return type_(**dct)
# the following are "Internal Nodes" of the collection -> publication ->
# artifact hierarchy. they all have _children attributes and _deep_asdict
# and _replace_children methods>
class Publication(typing.NamedTuple):
"""A publication.
Attributes
----------
artifacts : Dict[str, Artifact]
The artifacts contained in the publication.
metadata: Dict[str, Any]
The metadata dictionary.
ready: Optional[bool]
If False, this publication is not ready and will not be published.
release_time: Optional[datetime.datetime]
The time before which this publication will not be released.
"""
metadata: typing.Mapping[str, typing.Any]
artifacts: typing.Mapping[str, Artifact]
ready: bool = True
release_time: datetime.datetime = None
def _deep_asdict(self):
"""A dictionary representation of the publication and its children."""
return {
"metadata": self.metadata,
"artifacts": {k: a._asdict() for (k, a) in self.artifacts.items()},
}
@classmethod
def _deep_fromdict(cls, dct):
return cls(
metadata=dct["metadata"],
artifacts={
k: _artifact_from_dict(d) for (k, d) in dct["artifacts"].items()
},
)
@property
def _children(self):
return self.artifacts
def _replace_children(self, new_children):
return self._replace(artifacts=new_children)
class Collection(typing.NamedTuple):
"""A collection.
Attributes
----------
schema : Schema
The schema used to validate the publications within the collection.
publications : Mapping[str, Publication]
The publications contained in the collection.
"""
schema: "Schema"
publications: typing.Mapping[str, Publication]
def _deep_asdict(self):
"""A dictionary representation of the collection and its children."""
return {
"schema": self.schema._asdict(),
"publications": {
k: p._deep_asdict() for (k, p) in self.publications.items()
},
}
@classmethod
def _deep_fromdict(cls, dct):
return cls(
schema=Schema(**dct["schema"]),
publications={
k: Publication._deep_fromdict(d)
for (k, d) in dct["publications"].items()
},
)
@property
def _children(self):
return self.publications
def _replace_children(self, new_children):
return self._replace(publications=new_children)
class Universe(typing.NamedTuple):
"""Container of all collections.
Attributes
----------
collections : Dict[str, Collection]
The collections.
"""
collections: typing.Mapping[str, Collection]
@property
def _children(self):
return self.collections
def _replace_children(self, new_children):
return self._replace(collections=new_children)
def _deep_asdict(self):
"""A dictionary representation of the universe and its children."""
return {
"collections": {k: p._deep_asdict() for (k, p) in self.collections.items()},
}
@classmethod
def _deep_fromdict(cls, dct):
return cls(
collections={
k: Collection._deep_fromdict(d) for (k, d) in dct["collections"].items()
},
)
class Schema(typing.NamedTuple):
"""Rules governing publications.
Attributes
----------
required_artifacts : typing.Collection[str]
Names of artifacts that publications must contain.
optional_artifacts : typing.Collection[str], optional
Names of artifacts that publication are permitted to contain. Default: empty
list.
metadata_schema : Mapping[str, Any], optional
A dictionary describing a schema used to validate publication metadata. In the
style of cerberus. If None, no validation will be performed. Default: None.
allow_unspecified_artifacts : Optional[Boolean]
Is it permissible for a publication to have unknown artifacts? Default: False.
is_ordered : Optional[Boolean]
Should the publications be considered ordered by their keys? Default: False
"""
required_artifacts: typing.Collection[str]
optional_artifacts: typing.Collection[str] = None
metadata_schema: typing.Mapping[str, typing.Mapping] = None
allow_unspecified_artifacts: bool = False
is_ordered: bool = False
class DateContext(typing.NamedTuple):
"""A context used to resolve smart dates.
Attributes
----------
known : Optional[Mapping[str, datetime]]
A dictionary of known dates. If None, there are no known dates.
start_of_week_one : Optional[datetime.date]
What should be considered the start of "week 1". If None, smart dates referring
to weeks cannot be used.
"""
known: dict = None
start_of_week_one: typing.Optional[datetime.date] = None
|
import json
from decimal import Decimal
from lib.domain.model.humouword_factory import HumouWordFactory
from lib.domain.model.humouword import WordId
from app.infrastructure.humouword import HumouWordDataSource
from app.application.humouword import HumouWordRegisterService
from app.application.humouword import HumouWordGetService
from app.application.humouword import HumouWordDeleteService
from app.application.humouword import GetHumouService
def register_humou_word_handler(event, context):
params = json.loads(event['body'])
humou_word_datasource = HumouWordDataSource()
humou_word_register_service = HumouWordRegisterService(humou_word_datasource)
result = True
humou_word_list = []
for param in params:
humou_word = HumouWordFactory.create(param)
ret = humou_word_register_service.register(humou_word)
humou_word_list.append(humou_word.to_dict())
if not ret:
result = False
if result is True:
body = {
"message": "HumouWord Create Request successfully!",
"humou_word_list": humou_word_list
}
return create_response(200, body)
else:
body = {
"message": "HumouWord Create Request failure!",
}
return create_response(500, body)
def find_humou_word_handler(event, context):
humou_word_datasource = HumouWordDataSource()
humou_word_register_service = HumouWordGetService(humou_word_datasource)
results = humou_word_register_service.find_all()
humou_word_dict_list = []
for result in results:
humou_word_dict = result.to_dict()
humou_word_dict_list.append(humou_word_dict)
body = {
"message": "Get HumouWord Request successfully!",
"humou_word_list": humou_word_dict_list
}
return create_response(200, body)
def delete_humou_word_handler(event, context):
humou_word_datasource = HumouWordDataSource()
humou_word_get_service = HumouWordGetService(humou_word_datasource)
humou_word_delete_service = HumouWordDeleteService(humou_word_datasource)
word_id = WordId(int(event['pathParameters']['wordId']))
humou_word = humou_word_get_service.find_by_id(word_id)
result = humou_word_delete_service.delete(humou_word)
if result is True:
body = {
"message": "HumouWord Delete Request successfully!",
}
return create_response(200, body)
else:
body = {
"message": "HumouWord Delete Request failure!",
}
return create_response(500, body)
def get_humou_handler(event, context):
humou_word_datasource = HumouWordDataSource()
get_humou_service = GetHumouService(humou_word_datasource)
result = get_humou_service.get()
body = {
"message": "Get Humou Request successfully!",
"humou": result
}
return create_response(200, body)
def create_response(status_code, body):
return {
"statusCode": status_code,
"headers": {
"x-custom-header": "my custom header value"
},
"body": json.dumps(body, default=decimal_default_proc)
}
def decimal_default_proc(obj):
if isinstance(obj, Decimal):
return float(obj)
return obj
|
# -*- coding: utf-8 -*-
from openprocurement.auction.esco.auctions import simple, multilot
from openprocurement.auction.esco.tests.unit.constants import AUCTIONS
def test_put_auction_data_without_dc(universal_auction, logger, mocker):
# TODO: find out what actually '_type' field serves for
if universal_auction.lot_id:
universal_auction._type = multilot
else:
universal_auction._type = simple
mock_upload_audit_file_without_document_service = mocker.patch.object(universal_auction,
'upload_audit_file_without_document_service',
autospec=True)
mock_upload_audit_file_without_document_service.return_value = 'doc_id'
mock_post_results_data = mocker.MagicMock(return_value=None)
mocker.patch('{}.post_results_data'.format(AUCTIONS['simple']), mock_post_results_data)
mocker.patch('{}.post_results_data'.format(AUCTIONS['multilot']), mock_post_results_data)
result = universal_auction.put_auction_data()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert result is None
assert mock_upload_audit_file_without_document_service.call_count == 1
mock_post_results_data.assert_called_once_with(universal_auction)
assert log_strings[-2] == "Auctions results not approved"
mock_post_results_data.return_value = 'results from post_results_data'
mock_announce_results_data = mocker.MagicMock(return_value='bids_information')
mocker.patch('{}.announce_results_data'.format(AUCTIONS['simple']), mock_announce_results_data)
mock_approve_audit_info_on_announcement = mocker.patch.object(universal_auction,
'approve_audit_info_on_announcement',
autospec=True)
result = universal_auction.put_auction_data()
if universal_auction.lot_id:
assert result is None
else:
assert result is True
mock_announce_results_data.assert_called_once_with(universal_auction,
'results from post_results_data')
mock_approve_audit_info_on_announcement.assert_called_once_with(approved='bids_information')
assert mock_upload_audit_file_without_document_service.call_count == 3
assert mock_upload_audit_file_without_document_service.call_args[0] == ('doc_id',)
def test_put_auction_data_with_dc(universal_auction, logger, mocker):
universal_auction.worker_defaults['with_document_service'] = True
# TODO: find out what actually '_type' field serves for
if universal_auction.lot_id:
universal_auction._type = multilot
else:
universal_auction._type = simple
mock_upload_audit_file_with_document_service = mocker.patch.object(universal_auction,
'upload_audit_file_with_document_service',
autospec=True)
mock_upload_audit_file_with_document_service.return_value = 'doc_id'
mock_post_results_data = mocker.MagicMock(return_value=None)
mocker.patch('{}.post_results_data'.format(AUCTIONS['simple']), mock_post_results_data)
mocker.patch('{}.post_results_data'.format(AUCTIONS['multilot']), mock_post_results_data)
result = universal_auction.put_auction_data()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert result is None
assert mock_upload_audit_file_with_document_service.call_count == 1
mock_post_results_data.assert_called_once_with(universal_auction)
assert log_strings[-2] == "Auctions results not approved"
mock_post_results_data.return_value = 'results from post_results_data'
mock_announce_results_data = mocker.MagicMock(return_value='bids_information')
mocker.patch('{}.announce_results_data'.format(AUCTIONS['simple']), mock_announce_results_data)
mock_approve_audit_info_on_announcement = mocker.patch.object(universal_auction,
'approve_audit_info_on_announcement',
autospec=True)
result = universal_auction.put_auction_data()
if universal_auction.lot_id:
assert result is None
else:
assert result is True
mock_announce_results_data.assert_called_once_with(universal_auction, 'results from post_results_data')
mock_approve_audit_info_on_announcement.assert_called_once_with(approved='bids_information')
assert mock_upload_audit_file_with_document_service.call_count == 3
assert mock_upload_audit_file_with_document_service.call_args[0] == ('doc_id',)
def test_post_announce(universal_auction, mocker):
mocker.spy(universal_auction, 'generate_request_id')
mock_get_auction_document = mocker.patch.object(universal_auction, 'get_auction_document', autospec=True)
mock_save_auction_document = mocker.patch.object(universal_auction, 'save_auction_document', autospec=True)
mock_announce_results_data = mocker.MagicMock()
base = 'openprocurement.auction.worker.auctions.{}.announce_results_data'
mocker.patch(base.format('simple'), mock_announce_results_data)
mocker.patch(base.format('multilot'), mock_announce_results_data)
universal_auction.post_announce()
assert universal_auction.generate_request_id.call_count == 1
assert mock_get_auction_document.call_count == 1
assert mock_save_auction_document.call_count == 1
mock_announce_results_data.assert_called_once_with(universal_auction, None)
|
# Generated by Django 2.1.1 on 2018-09-30 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0010_auto_20180927_1222'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='slide_photo',
),
migrations.AddField(
model_name='photo',
name='main_photo',
field=models.BooleanField(default=True, help_text='Головне фото, яке відображається в каталогах. Вимоги: співвідношення сторін 2:3, інакше зображення буде обрізано!', verbose_name='Слайди'),
),
]
|
import os
class Leaderboard:
def __init__(self):
self.__score = 49
self.__username = "Hamzah"
self.__filename = "leaderboard.txt"
self.__listScores = []
self.__maxList = 10
self.__readLeaderboard = False
def setScore(self, value):
self.__score = value
def getScore(self):
return self.__score
def setUsername(self, name):
self.__username = name
def getUsername(self):
return self.__username
def setReadLeaderboard(self, value):
self.__readLeaderboard = value
def getReadLeaderboard(self):
return self.__readLeaderboard
def getScoreList(self):
return self.__listScores
def getMaxList(self):
return self.__maxList
def updateLeaderboard(self):
self.readScore()
if (len(self.__listScores) > 0):
highScore = self.__listScores[-1]["score"]
else:
highScore = 0
return (self.__score > highScore or len(self.__listScores) < self.__maxList)
def readScore(self):
if not os.path.exists(self.__filename):
file = open(self.__filename, 'a+')
else:
file = open(self.__filename, 'r+')
lines = file.readlines()
file.close()
self.__listScores = []
for line in lines:
name, score = line.split(",")
score = int(score)
self.__listScores.append({"name": name, "score": score})
def insertScore(self):
os.remove(self.__filename)
file = open(self.__filename, 'a+')
enteredScore = False
for item in self.__listScores:
name = item["name"]
score = item["score"]
if (self.__score > score and not enteredScore):
file.write(self.__username+","+ str(self.__score) + "\n")
enteredScore = True
file.write(name+","+ str(score) + "\n")
if (not enteredScore):
file.write(self.__username+","+ str(self.__score) + "\n")
file.close()
self.updateLeaderboard()
# score = Score()
# score.updateLeaderboard()
|
# Define uma dependencia de um pra muitos entre os objetos, de modo que qualquer alteração sera notificada
# aos dependentes de maneira automatica
class CanalDeNoticia:
def __init__(self):
self.__inscritos = []
self.__noticia = ''
def inscrever(self, inscrito):
self.__inscritos.append(inscrito)
def desinscrever(self):
self.__inscritos.pop()
def inscritos(self):
return [type(x).__name__ for x in self.__inscritos]
def notificar_inscritos(self):
for inscrito in self.__inscritos:
inscrito.update()
def adicionar_noticia(self, noticia):
self.__noticia = noticia
@property
def noticia(self):
return self.__noticia
from abc import ABCMeta, abstractmethod
class Assinar(metaclass=ABCMeta):
@abstractmethod
def update(self):
pass
class SMS(Assinar):
def __init__(self, assinatura):
self.assinatura = assinatura
self.assinatura.inscrever(self)
def update(self):
print(type(self).__name__, self.assinatura.noticia)
class Email(Assinar):
def __init__(self, assinatura):
self.assinatura = assinatura
self.assinatura.inscrever(self)
def update(self):
print(type(self).__name__, self.assinatura.noticia)
canal_de_noticia = CanalDeNoticia()
sms = SMS(canal_de_noticia)
email = Email(canal_de_noticia)
print('Inscritos', canal_de_noticia.inscritos())
print('#### Adicionando noticia ####')
canal_de_noticia.adicionar_noticia('Noticia 1')
print('#### Noticia adicionada ####')
print('#### Notificar inscritos ####')
canal_de_noticia.notificar_inscritos()
|
"""Write a class called Wordplay. It should have a field that holds a list of words. The user of the class should pass the list of words they want to use to the class. There should be the following methods:
• words_with_length(length) — returns a list of all the words of length length
• starts_with(s) — returns a list of all the words that start with s
• ends_with(s) — returns a list of all the words that end with s
• palindromes() — returns a list of all the palindromes in the list
• only(L) — returns a list of the words that contain only those letters in L
• avoids(L) — returns a list of the words that contain none of the letters in L"""
class Wordplay:
def __init__(self, word_list):
self.word_list = word_list
def words_with_length(self, length):
s = []
for word in self.word_list:
if len(word) == length:
s.append(word)
return s
def starts_with(self, s):
l = []
for word in self.word_list:
if word.startswith(s):
l.append(word)
return l
def ends_with(self, s):
pass
def palindromes(self):
pass
def only(L):
pass
def avoids(L):
pass
words = ["Come", "game", "market", "grace", "receive", "thanks", "people", "language"]
wordplay = Wordplay(words)
print(wordplay.words_with_length(6))
print(wordplay.starts_with("g"))
|
from sqlalchemy import Column, Integer, String, Float, ForeignKey, DateTime, Table, Boolean
from sqlalchemy.orm import relationship
from settings.database import Base
import datetime
class BaseMixin(object):
id = Column(Integer, primary_key=True)
deleted = Column(Boolean, default=False, nullable=False)
class DateAware(BaseMixin, Base):
__abstract__ = True
created = Column(DateTime, default=datetime.datetime.today())
modified = Column(DateTime, default=datetime.datetime.today())
class Image(DateAware):
__abstract__ = True
path = Column(String(100), unique=True, nullable=False)
|
'''
Updates from events
'''
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
try:
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Define the module's virtual name
__virtualname__ = 'checks'
def __virtual__():
if HAS_LIBS:
return __virtualname__
def http(name, status):
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = {}
kwargs['status'] = True
__salt__['mine.send']('checks.http', name, **kwargs)
ret['comment'] = 'http status updated'
return ret
|
import torch
from torch import nn
from torch import optim
import torchvision
from torchvision import datasets, transforms, models
# LOADING DATA FUNCTION
def load_data(data_dir):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=64)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
return trainloader, validloader, testloader, train_data
#–––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
def model_train_and_test(save_dir,arch,learning_rate,hidden_units,epochs,gpu_act,trainloader,validloader,testloader,train_data):
model = getattr(torchvision.models, arch)(pretrained=True)
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda" if gpu_act == 'True' else "cpu")
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(25088, int(hidden_units)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(int(hidden_units), 102),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=float(learning_rate))
model.to(device);
epochs = int(epochs)
steps = 0
running_loss = 0
print_every = 20
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
validation_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
validation_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch: {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Validation loss: {validation_loss/len(validloader):.3f}.. "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
running_loss = 0
model.train()
model.eval()
count = 0
accuracy = 0
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
count += 1
logps = model.forward(images)
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Test accuracy: {accuracy/count:.3f}")
model.class_to_idx = train_data.class_to_idx
checkpoint = {'arch': str(arch),
'learning_rate': float(learning_rate),
'epochs': int(epochs),
'optimizer': optimizer.state_dict(),
'classifier' : model.classifier,
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx}
torch.save(checkpoint, save_dir + '/checkpoint.pth')
#–––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
|
import unittest
from search.search import kmp_search, prepare_lps
class TestSearch(unittest.TestCase):
def test_prepare_lps(self):
self.assertEqual(
prepare_lps("ababd"),
[0, 0, 0, 1, 2, 0]
)
def test_search(self):
string = """Lorem ipsum dodolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dodolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dodolor in reprehenderit in
voluptate velit esse cillum dodolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
pattern = "dodolor"
self.assertEqual(
kmp_search(string, pattern),
[
(1, 13), (2, 47), (4, 50), (5, 29)
]
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from base import smartpool
def db_conn(db_name):
def deco(old_handler):
@wraps(old_handler)
def new_handler(*args, **kwargs):
kwargs[db_name] = smartpool.ConnectionProxy(db_name)
return old_handler(*args, **kwargs)
return new_handler
return deco
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
with open("test4.txt","r",encoding="utf-8") as f:
# s = f.read()
s= f.readlines()
for k in s:
if "查看详情" not in k:
# file = open('test5.txt', 'wb')
file = open('test5.txt', 'a', encoding="utf-8")
file.writelines(str(k)+"\n")
file.close()
# file.writelines(a + "\n")
# print(s)
# s1 = s.strip()
# s2 = s1.lstrip()
# s3 = s2.rstrip()
# s4=s3.replace(' ','')
# print(s4)
# file = open('test4.txt', 'wb')
# file.write(str(s4).encode("utf-8"))
# file.close()
|
import requests # http://docs.python-requests.org/en/master/api/
import unittest
import json
from requests.status_codes import codes
"""
This is a python script for integration testing our sample app.
Execution requirements:
1. Start Cytoscape from the command line with the -R option set to 1234 (normally './cytoscape.sh -R 1234' or
'./cytoscape.sh -R 1234')
2. Using the Cytoscape App Manager, ensure that CyREST is installed, and is up to date.
3. Install your sample app jar through the Cytoscape App Manager by using the 'Install from file...' button
4. Execute this script
You should get console output that says looks something like the folllowing:
Ran 2 tests in 0.042s
OK
Process finished with exit code 0
This indicates that your app has passed the 2 tests we have written below.
"""
class SampleTestCase(unittest.TestCase):
# Some information about our host and REST port (this should correspond with the -R option mentioned above).
_HOST = "localhost"
_PORT = "1234"
# Set up HTTP Request headers for our request.
_HEADERS = {'Content-type': 'application/json', 'Accept': 'application/json'}
# Any code that needs to be run before each test should go here.
def setUp(self):
pass
# Any code that needs to be run after each test should go here.
# In our case, the test_students() method adds a specific student, Jeff Winger, to the student list, and this should
# be removed to return the resource to its original state.
def tearDown(self):
result = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students",
data=None,
params=None,
headers=SampleTestCase._HEADERS)
assert result.status_code == codes.OK, "Status code was expected to be 200, but was {}".format(result.status_code)
students = result.json()
for student in students:
studentResult = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students/{}".format(
student),
data=None,
params=None,
headers=SampleTestCase._HEADERS)
studentJson = studentResult.json()
if (studentJson["firstName"] == "Jeff" and studentJson["lastName"] == "Winger"):
requests.request("DELETE",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students/{}".format(student),
data=None,
params=None,
headers=SampleTestCase._HEADERS)
"""
All tests in our test suite are named starting with the text 'test'. This is set up in the suite() definition.
This is a test of the GET /cyrestbestpractices/v1/classroom/teacher operation.
"""
@staticmethod
def test_get_teacher():
# Perform the request
result = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/teacher",
data=None,
params=None,
headers=SampleTestCase._HEADERS)
"""
Assert statements are how we evaluate tests. The the expression 'result.status_code == codes.OK' doesn't resolve
to true, this assertion will fail, and the console will report failures instead of OK. All assertions in your
tests should resolve to true for your code to pass these tests.
"""
assert result.status_code == codes.OK , "Status code was expected to be 200, but was {}".format(result.status_code)
"""
Here, we extract fields from the JSON content of the result.
"""
firstName = result.json()["firstName"]
lastName = result.json()["lastName"]
age = result.json()["age"]
# Assert that the teacher is Ben Chang.
assert firstName == "Ben", "Expected firstName to be 'Ben' but was '" + firstName + "'"
assert lastName == "Chang", "Expected lastName to be 'Chang' but was '" + lastName + "'"
assert age == 32, "Expected age to be 32, but was {}".format(age)
"""
This is a test of the POST /cyrestbestpractices/v1/classroom/students operation.
We get a list of all pre-existing students that we can make a simple size comparison against, add a new student, and
then check that the size of the student list has incremented by one.
"""
@staticmethod
def test_students():
# Set up HTTP Request headers for our request.
# Make a get request to get a list of current students.
result = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students",
data=None,
params=None,
headers=SampleTestCase._HEADERS)
assert result.status_code == codes.OK, "Status code was expected to be 200, but was {}".format(
result.status_code)
students = result.json()
#Noting the original number of students
originalLength = len(students);
#Creating a JSON object for the new student.
jsonMessageBody = json.dumps({"firstName": "Jeff","lastName":"Winger", "age": 42})
#Making a post request to add the student.
result = requests.request("POST",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students",
data=jsonMessageBody,
params=None,
headers=SampleTestCase._HEADERS)
assert result.status_code == codes.OK, "Status code was expected to be 200, but was {}".format(
result.status_code)
# Make a get request to get the updated list of students.
result = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students",
data=None,
params=None,
headers=SampleTestCase._HEADERS)
assert result.status_code == codes.OK, "Status code was expected to be 200, but was {}".format(
result.status_code)
newStudents = result.json()
# Assert that adding a student increased the number of students by one.
assert originalLength+1 == len(newStudents), "Expected student size to be {} but was {}".format((originalLength+1), len(students))
"""
The following lines of code check to make sure that we now have a the exact student we added in the classroom by
iterating over the student list, using a GET to retrieve each individual student, and setting hasJeffWinger to
true if a student with the same fields as the one we added are found.
"""
hasJeffWinger = False
for student in newStudents:
studentResult = requests.request("GET",
"http://" + SampleTestCase._HOST + ":" + SampleTestCase._PORT + "/cyrestbestpractices/v1/classroom/students/{}".format(student),
data=None,
params=None,
headers=SampleTestCase._HEADERS)
studentJson = studentResult.json()
if studentJson["firstName"] == "Jeff" and studentJson["lastName"] == "Winger" and studentJson["age"] == 42:
hasJeffWinger = True
#This should fail if we didn't find a student named Jeff Winger.
assert hasJeffWinger, "POST student operation did not add Jeff Winger."
# This defines our test suite.
def suite():
version_suite = unittest.makeSuite(SampleTestCase, "test")
return unittest.TestSuite((version_suite))
# This defines our main method for execution.
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
|
from unittest import mock
import pytest
import keg_storage
class TestStorage:
def test_app_to_init_call_init(self):
app = mock.MagicMock()
app.config = {
"KEG_STORAGE_PROFILES": [(keg_storage.backends.StorageBackend, {"name": "test"})]
}
storage = keg_storage.Storage(app)
assert "test" in storage._interfaces
assert storage.interface == "test"
# Test plugin lookup.
assert isinstance(storage.get_interface(), keg_storage.backends.StorageBackend)
assert isinstance(storage.get_interface("test"), keg_storage.backends.StorageBackend)
# Test invalid plugin.
with pytest.raises(ValueError, match="invalid interface 'foo'"):
storage.get_interface("foo")
def test_migration_storage_profiles(self):
# Old name gets translated to current name.
app = mock.MagicMock()
app.config = {
"STORAGE_PROFILES": [(keg_storage.backends.StorageBackend, {"name": "found"})]
}
with pytest.warns(DeprecationWarning, match="STORAGE_PROFILES is deprecated"):
storage = keg_storage.Storage(app)
assert "found" in storage._interfaces
assert storage.interface == "found"
# If both are there, use the current name.
app = mock.MagicMock()
app.config = {
"STORAGE_PROFILES": [(keg_storage.backends.StorageBackend, {"name": "ignored"})],
"KEG_STORAGE_PROFILES": [(keg_storage.backends.StorageBackend, {"name": "found"})],
}
with pytest.warns(
DeprecationWarning,
match="Found both KEG_STORAGE_PROFILES and deprecated STORAGE_PROFILES",
):
storage = keg_storage.Storage(app)
assert "ignored" not in storage._interfaces
assert "found" in storage._interfaces
assert storage.interface == "found"
def test_no_storage_profiles(self):
app = mock.MagicMock()
app.config = {"KEG_STORAGE_PROFILES": []}
storage = keg_storage.Storage(app)
with pytest.raises(ValueError, match="no interface was specified"):
storage.get_interface()
with pytest.raises(ValueError, match="invalid interface 'foo'"):
storage.get_interface("foo")
|
from django.contrib import admin
from .models import Movie, Showing, Order
class MovieAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'description', 'active',)
list_filter = ('active',)
search_fields = ('name',)
ordering = ('id',)
admin.site.register(Movie, MovieAdmin)
class ShowingAdmin(admin.ModelAdmin):
list_display = ('name', 'price_per_ticket', 'movie', 'showing_room', 'remaining_seats',
'start', 'end', 'status')
list_filter = ('movie', 'showing_room', 'status')
search_fields = ('movie',)
ordering = ('id',)
admin.site.register(Showing, ShowingAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'email', 'showing', 'quantity', 'final_price',)
list_filter = ('showing',)
ordering = ('id',)
admin.site.register(Order, OrderAdmin)
|
from rest_framework import permissions, status
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from .models import Test
from .serializers import ResultPostSerializer, TestSerializer
class ListRetrieveViewSet(RetrieveModelMixin, ListModelMixin, GenericViewSet):
pass
class TestViewSet(ListRetrieveViewSet):
queryset = Test.objects.all()
serializer_class = TestSerializer
@action(
detail=True,
methods=["POST"],
url_path="answer",
url_name="test-answer",
serializer_class=ResultPostSerializer,
permission_classes=[permissions.AllowAny],
)
def test_answer(self, request, pk):
"""
Endpoint for answer selected test.
"questions' field must contains all answers for current test.
"""
serializer = self.serializer_class(
data=request.data,
context={"request": request, "test_pk": pk},
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import os
import uuid
from dataclasses import dataclass
from enum import Enum
from typing import Any
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PexLayout
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists_pep660 import (
EditableLocalDists,
EditableLocalDistsRequest,
)
from pants.backend.python.util_rules.pex import Pex, PexProcess, PexRequest, VenvPex, VenvPexProcess
from pants.backend.python.util_rules.pex_cli import PexPEX
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_requirements import EntireLockfile, Lockfile
from pants.core.goals.export import (
Export,
ExportError,
ExportRequest,
ExportResult,
ExportResults,
ExportSubsystem,
PostProcessingCommand,
)
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.internals.native_engine import AddPrefix, Digest, MergeDigests, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import ProcessCacheScope, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.option.option_types import BoolOption, EnumOption, StrListOption
from pants.util.strutil import path_safe, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ExportVenvsRequest(ExportRequest):
pass
@dataclass(frozen=True)
class _ExportVenvForResolveRequest(EngineAwareParameter):
resolve: str
class PythonResolveExportFormat(Enum):
"""How to export Python resolves."""
mutable_virtualenv = "mutable_virtualenv"
symlinked_immutable_virtualenv = "symlinked_immutable_virtualenv"
class ExportPluginOptions:
py_resolve_format = EnumOption(
default=PythonResolveExportFormat.mutable_virtualenv,
help=softwrap(
"""\
Export Python resolves using this format. Options are:
- `mutable_virtualenv`: Export a standalone mutable virtualenv that you can
further modify.
- `symlinked_immutable_virtualenv`: Export a symlink into a cached Python virtualenv.
This virtualenv will have no pip binary, and will be immutable. Any attempt to
modify it will corrupt the cache! It may, however, take significantly less time
to export than a standalone, mutable virtualenv.
"""
),
)
symlink_python_virtualenv = BoolOption(
default=False,
help="Export a symlink into a cached Python virtualenv. This virtualenv will have no pip binary, "
"and will be immutable. Any attempt to modify it will corrupt the cache! It may, however, "
"take significantly less time to export than a standalone, mutable virtualenv will.",
removal_version="2.20.0.dev0",
removal_hint="Set the `[export].py_resolve_format` option to 'symlinked_immutable_virtualenv'",
)
py_editable_in_resolve = StrListOption(
# TODO: Is there a way to get [python].resolves in a memoized_property here?
# If so, then we can validate that all resolves here are defined there.
help=softwrap(
"""
When exporting a mutable virtualenv for a resolve, do PEP-660 editable installs
of all 'python_distribution' targets that own code in the exported resolve.
If a resolve name is not in this list, 'python_distribution' targets will not
be installed in the virtualenv. This defaults to an empty list for backwards
compatibility and to prevent unnecessary work to generate and install the
PEP-660 editable wheels.
This only applies when '[python].enable_resolves' is true and when exporting a
'mutable_virtualenv' ('symlinked_immutable_virtualenv' exports are not "full"
virtualenvs because they must not be edited, and do not include 'pip').
NOTE: If you are using legacy exports (not using the '--resolve' option), then
this option has no effect. Legacy exports will not include any editable installs.
"""
),
advanced=True,
)
async def _get_full_python_version(pex_or_venv_pex: Pex | VenvPex) -> str:
# Get the full python version (including patch #).
is_venv_pex = isinstance(pex_or_venv_pex, VenvPex)
kwargs: dict[str, Any] = dict(
description="Get interpreter version",
argv=[
"-c",
"import sys; print('.'.join(str(x) for x in sys.version_info[0:3]))",
],
extra_env={"PEX_INTERPRETER": "1"},
)
if is_venv_pex:
kwargs["venv_pex"] = pex_or_venv_pex
res = await Get(ProcessResult, VenvPexProcess(**kwargs))
else:
kwargs["pex"] = pex_or_venv_pex
res = await Get(ProcessResult, PexProcess(**kwargs))
return res.stdout.strip().decode()
@dataclass(frozen=True)
class VenvExportRequest:
pex_request: PexRequest
dest_prefix: str
resolve_name: str
qualify_path_with_python_version: bool
editable_local_dists_digest: Digest | None = None
@rule
async def do_export(
req: VenvExportRequest,
pex_pex: PexPEX,
pex_env: PexEnvironment,
export_subsys: ExportSubsystem,
) -> ExportResult:
if not req.pex_request.internal_only:
raise ExportError(f"The PEX to be exported for {req.resolve_name} must be internal_only.")
dest_prefix = (
os.path.join(req.dest_prefix, path_safe(req.resolve_name))
if req.resolve_name
else req.dest_prefix
)
# digest_root is the absolute path to build_root/dest_prefix/py_version
# (py_version may be left off in some cases)
output_path = "{digest_root}"
complete_pex_env = pex_env.in_workspace()
if export_subsys.options.symlink_python_virtualenv:
export_format = PythonResolveExportFormat.symlinked_immutable_virtualenv
else:
export_format = export_subsys.options.py_resolve_format
if export_format == PythonResolveExportFormat.symlinked_immutable_virtualenv:
# NB: The symlink performance hack leaks an internal named cache location as output (via
# the symlink target). If the user partially or fully deletes the named cache, the symlink
# target might point to a malformed venv, or it might not exist at all.
# To prevent returning a symlink to a busted or nonexistent venv from a cached process
# (or a memoized rule) we force the process to rerun per-session.
# This does mean re-running the process superfluously when the named cache is intact, but
# that is generally fast, since all wheels are already cached, and it's best to be safe.
requirements_venv_pex = await Get(
VenvPex,
PexRequest,
dataclasses.replace(req.pex_request, cache_scope=ProcessCacheScope.PER_SESSION),
)
py_version = await _get_full_python_version(requirements_venv_pex)
# Note that for symlinking we ignore qualify_path_with_python_version and always qualify,
# since we need some name for the symlink anyway.
dest = f"{dest_prefix}/{py_version}"
description = (
f"symlink to immutable virtualenv for {req.resolve_name or 'requirements'} "
f"(using Python {py_version})"
)
venv_abspath = os.path.join(complete_pex_env.pex_root, requirements_venv_pex.venv_rel_dir)
return ExportResult(
description,
dest,
post_processing_cmds=[
# export creates an empty directory for us when the digest gets written.
# We have to remove that before creating the symlink in its place.
PostProcessingCommand(["rmdir", output_path]),
PostProcessingCommand(["ln", "-s", venv_abspath, output_path]),
],
resolve=req.resolve_name or None,
)
elif export_format == PythonResolveExportFormat.mutable_virtualenv:
# Note that an internal-only pex will always have the `python` field set.
# See the build_pex() rule and _determine_pex_python_and_platforms() helper in pex.py.
requirements_pex = await Get(Pex, PexRequest, req.pex_request)
assert requirements_pex.python is not None
py_version = await _get_full_python_version(requirements_pex)
if req.qualify_path_with_python_version:
dest = f"{dest_prefix}/{py_version}"
else:
dest = dest_prefix
description = (
f"mutable virtualenv for {req.resolve_name or 'requirements'} "
f"(using Python {py_version})"
)
merged_digest = await Get(Digest, MergeDigests([pex_pex.digest, requirements_pex.digest]))
tmpdir_prefix = f".{uuid.uuid4().hex}.tmp"
tmpdir_under_digest_root = os.path.join("{digest_root}", tmpdir_prefix)
merged_digest_under_tmpdir = await Get(Digest, AddPrefix(merged_digest, tmpdir_prefix))
post_processing_cmds = [
PostProcessingCommand(
complete_pex_env.create_argv(
os.path.join(tmpdir_under_digest_root, pex_pex.exe),
*(
os.path.join(tmpdir_under_digest_root, requirements_pex.name),
"venv",
"--pip",
"--collisions-ok",
output_path,
),
),
{
**complete_pex_env.environment_dict(python=requirements_pex.python),
"PEX_MODULE": "pex.tools",
},
),
# Remove the requirements and pex pexes, to avoid confusion.
PostProcessingCommand(["rm", "-rf", tmpdir_under_digest_root]),
]
# Insert editable wheel post processing commands if needed.
if req.editable_local_dists_digest is not None:
# We need the snapshot to get the wheel file names which are something like:
# - pkg_name-1.2.3-0.editable-py3-none-any.whl
wheels_snapshot = await Get(Snapshot, Digest, req.editable_local_dists_digest)
# We need the paths to the installed .dist-info directories to finish installation.
py_major_minor_version = ".".join(py_version.split(".", 2)[:2])
lib_dir = os.path.join(
output_path, "lib", f"python{py_major_minor_version}", "site-packages"
)
dist_info_dirs = [
# This builds: dist/.../resolve/3.8.9/lib/python3.8/site-packages/pkg_name-1.2.3.dist-info
os.path.join(lib_dir, "-".join(f.split("-")[:2]) + ".dist-info")
for f in wheels_snapshot.files
]
# We use slice assignment to insert multiple elements at index 1.
post_processing_cmds[1:1] = [
PostProcessingCommand(
[
# The wheels are "sources" in the pex and get dumped in lib_dir
# so we move them to tmpdir where they will be removed at the end.
"mv",
*(os.path.join(lib_dir, f) for f in wheels_snapshot.files),
tmpdir_under_digest_root,
]
),
PostProcessingCommand(
[
# Now install the editable wheels.
os.path.join(output_path, "bin", "pip"),
"install",
"--no-deps", # The deps were already installed via requirements.pex.
"--no-build-isolation", # Avoid VCS dep downloads (as they are installed).
*(os.path.join(tmpdir_under_digest_root, f) for f in wheels_snapshot.files),
]
),
PostProcessingCommand(
[
# Replace pip's direct_url.json (which points to the temp editable wheel)
# with ours (which points to build_dir sources and is marked "editable").
# Also update INSTALLER file to indicate that pants installed it.
"sh",
"-c",
" ".join(
[
f"mv -f {src} {dst}; echo pants > {installer};"
for src, dst, installer in zip(
[
os.path.join(d, "direct_url__pants__.json")
for d in dist_info_dirs
],
[os.path.join(d, "direct_url.json") for d in dist_info_dirs],
[os.path.join(d, "INSTALLER") for d in dist_info_dirs],
)
]
),
]
),
]
return ExportResult(
description,
dest,
digest=merged_digest_under_tmpdir,
post_processing_cmds=post_processing_cmds,
resolve=req.resolve_name or None,
)
else:
raise ExportError("Unsupported value for [export].py_resolve_format")
@dataclass(frozen=True)
class MaybeExportResult:
result: ExportResult | None
@rule
async def export_virtualenv_for_resolve(
request: _ExportVenvForResolveRequest,
python_setup: PythonSetup,
export_subsys: ExportSubsystem,
) -> MaybeExportResult:
resolve = request.resolve
lockfile_path = python_setup.resolves.get(resolve)
if not lockfile_path:
raise ExportError(
f"No resolve named {resolve} found in [{python_setup.options_scope}].resolves."
)
lockfile = Lockfile(
url=lockfile_path,
url_description_of_origin=f"the resolve `{resolve}`",
resolve_name=resolve,
)
interpreter_constraints = InterpreterConstraints(
python_setup.resolves_to_interpreter_constraints.get(
request.resolve, python_setup.interpreter_constraints
)
)
if resolve in export_subsys.options.py_editable_in_resolve:
editable_local_dists = await Get(
EditableLocalDists, EditableLocalDistsRequest(resolve=resolve)
)
editable_local_dists_digest = editable_local_dists.optional_digest
else:
editable_local_dists_digest = None
pex_request = PexRequest(
description=f"Build pex for resolve `{resolve}`",
output_filename=f"{path_safe(resolve)}.pex",
internal_only=True,
requirements=EntireLockfile(lockfile),
sources=editable_local_dists_digest,
interpreter_constraints=interpreter_constraints,
# Packed layout should lead to the best performance in this use case.
layout=PexLayout.PACKED,
)
dest_prefix = os.path.join("python", "virtualenvs")
export_result = await Get(
ExportResult,
VenvExportRequest(
pex_request,
dest_prefix,
resolve,
qualify_path_with_python_version=True,
editable_local_dists_digest=editable_local_dists_digest,
),
)
return MaybeExportResult(export_result)
@rule
async def export_virtualenvs(
request: ExportVenvsRequest,
export_subsys: ExportSubsystem,
) -> ExportResults:
if not export_subsys.options.resolve:
raise ExportError("Must specify at least one --resolve to export")
if request.targets:
raise ExportError("The `export` goal does not take target specs.")
maybe_venvs = await MultiGet(
Get(MaybeExportResult, _ExportVenvForResolveRequest(resolve))
for resolve in export_subsys.options.resolve
)
return ExportResults(mv.result for mv in maybe_venvs if mv.result is not None)
def rules():
return [
*collect_rules(),
Export.subsystem_cls.register_plugin_options(ExportPluginOptions),
UnionRule(ExportRequest, ExportVenvsRequest),
]
|
# hard题还是一看就不会
# A*算法,头次看
class Solution:
def cutOffTree(self, forest: List[List[int]]) -> int:
def f(i, j, x, y):
return abs(i - x) + abs(j - y)
def bfs(i, j, x, y):
q = [(f(i, j, x, y), i, j)]
dist = {i * n + j: 0}
while q:
_, i, j = heappop(q)
step = dist[i * n + j]
if (i, j) == (x, y):
return step
for a, b in [[0, -1], [0, 1], [-1, 0], [1, 0]]:
c, d = i + a, j + b
if 0 <= c < m and 0 <= d < n and forest[c][d] > 0:
if c * n + d not in dist or dist[c * n + d] > step + 1:
dist[c * n + d] = step + 1
heappush(q, (dist[c * n + d] + f(c, d, x, y), c, d))
return -1
m, n = len(forest), len(forest[0])
trees = [(forest[i][j], i, j) for i in range(m) for j in range(n) if forest[i][j] > 1]
trees.sort()
i = j = 0
ans = 0
for _, x, y in trees:
t = bfs(i, j, x, y)
if t == -1:
return -1
ans += t
i, j = x, y
return ans
|
import sys
import time
import os
import configparser
from subprocess import call
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
import matplotlib.pyplot as plt
#import cv2
from skimage import io
import matplotlib.image as mpimg
import matplotlib.animation as animation
import matplotlib.gridspec as gridspec
#'width_ratios':[5, 1, 5, 1, 3, 1],
#fig = plt.figure(figsize=(8,8)) # Notice the equal aspect ratio
#axes = [fig.add_subplot(3,6,i+1) for i in range(18)]
fig, axes = plt.subplots(3, 6, figsize=(20,13))
plt.subplots_adjust(wspace=0.01, hspace=0.05)
configplot = configparser.ConfigParser()
configplot.read('plotconfig.ini')
direct0=configplot['DIRECTS']['NODE_0_IMG']
rti00=configplot['IMAGES']['RTI_NODE0_CH0']
dop00=configplot['IMAGES']['DOP_NODE0_CH0']
def animate(i):
try:
#config = configparser.ConfigParser()
#config.read('/home/si/Dropbox/PyProjects/Poll_look/NeXtRAD.ini')
#configplot.read('/home/si/Dropbox/PyProjects/Poll_look/plotconfig.ini')
#NODE 0 IMAGES
direct0=configplot['DIRECTS']['NODE_0_IMG']
rti00=configplot['IMAGES']['RTI_NODE0_CH0']
dop00=configplot['IMAGES']['DOP_NODE0_CH0']
rti10=configplot['IMAGES']['RTI_NODE0_CH1']
dop10=configplot['IMAGES']['DOP_NODE0_CH1']
rti20=configplot['IMAGES']['RTI_NODE0_CH2']
dop20=configplot['IMAGES']['DOP_NODE0_CH2']
try:
img1 = io.imread(direct0+rti00)
img2 = io.imread(direct0+dop00)
#img2 = cv2.resize(img2, (img2.shape[1],img1.shape[0]),interpolation = cv2.INTER_AREA)
img3 = io.imread(direct0+rti10)
img4 = io.imread(direct0+dop10)
img5 = io.imread(direct0+rti20)
img6 = io.imread(direct0+dop20)
except:
print('Missing Images From Directory')
try:
axes[0, 0].clear()
axes[0, 0].imshow(img1)
axes[0, 0].set_title('Channel 0')
pad = 0.01
axes[0, 0].annotate('Node 0', xy=(0, 0.5), xytext=(-axes[0, 0].yaxis.labelpad - pad, 0),
xycoords=axes[0, 0].yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
axes[0, 0].axis('off')
#axes[0, 0].set_aspect('equal')
axes[0, 1].clear()
axes[0, 1].imshow(img2)
#axes[0, 1].set_title('Range-Doppler')
axes[0, 1].axis('off')
#axes[0, 1].set_aspect('equal')
axes[0, 2].clear()
axes[0, 2].imshow(img3)
axes[0, 2].set_title('Channel 1')
#axes[0, 2].set_title('Range-Doppler')
axes[0, 2].axis('off')
#axes[0, 2].set_aspect('equal')
axes[0, 3].clear()
axes[0, 3].imshow(img4)
#axes[0, 3].set_title('Range-Doppler')
axes[0, 3].axis('off')
#axes[0, 3].set_aspect('equal')
axes[0, 4].clear()
axes[0, 4].imshow(img5)
axes[0, 4].set_title('Channel 2')
#axes[0, 4].set_title('Range-Doppler')
axes[0, 4].axis('off')
#axes[0, 4].set_aspect('equal')
axes[0, 5].clear()
axes[0, 5].imshow(img6)
#axes[0, 5].set_title('Range-Doppler')
axes[0, 5].axis('off')
#axes[0, 5].set_aspect('equal')
except:
print('Could Not Plot Node 0 Images')
#NODE 1 IMAGES
direct1=configplot['DIRECTS']['NODE_1_IMG']
rti01=configplot['IMAGES']['RTI_NODE1_CH0']
dop01=configplot['IMAGES']['DOP_NODE1_CH0']
rti11=configplot['IMAGES']['RTI_NODE1_CH1']
dop11=configplot['IMAGES']['DOP_NODE1_CH1']
rti21=configplot['IMAGES']['RTI_NODE1_CH2']
dop21=configplot['IMAGES']['DOP_NODE1_CH2']
try:
img7 = io.imread(direct1+rti01)[:, :, :]
img8 = io.imread(direct1+dop01)[:, :, :]
img9 = io.imread(direct1+rti11)[:, :, :]
img10 = io.imread(direct1+dop11)[:, :, :]
img11 = io.imread(direct1+rti21)[:, :, :]
img12 = io.imread(direct1+dop21)[:, :, :]
except:
print('Missing Images From Directory')
try:
axes[1, 0].clear()
axes[1, 0].imshow(img7)
axes[1, 0].axis('off')
axes[1, 0].annotate('Node 1', xy=(0, 0.5), xytext=(-axes[1, 0].yaxis.labelpad - pad, 0),
xycoords=axes[1, 0].yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
axes[1, 1].clear()
axes[1, 1].imshow(img8)
axes[1, 1].axis('off')
axes[1, 2].clear()
axes[1, 2].imshow(img9)
axes[1, 2].axis('off')
axes[1, 3].clear()
axes[1, 3].imshow(img10)
axes[1, 3].axis('off')
axes[1, 4].clear()
axes[1, 4].imshow(img11)
axes[1, 4].axis('off')
axes[1, 5].clear()
axes[1, 5].imshow(img12)
axes[1, 5].axis('off')
except:
print('Could Not Plot Node 1 Images')
#NODE 1 IMAGES
direct2=configplot['DIRECTS']['NODE_2_IMG']
rti02=configplot['IMAGES']['RTI_NODE2_CH0']
dop02=configplot['IMAGES']['DOP_NODE2_CH0']
rti12=configplot['IMAGES']['RTI_NODE2_CH1']
dop12=configplot['IMAGES']['DOP_NODE2_CH1']
rti22=configplot['IMAGES']['RTI_NODE2_CH2']
dop22=configplot['IMAGES']['DOP_NODE2_CH2']
try:
img13 = io.imread(direct2+rti02)[:, :, :]
img14 = io.imread(direct2+dop02)[:, :, :]
img15 = io.imread(direct2+rti12)[:, :, :]
img16 = io.imread(direct2+dop12)[:, :, :]
img17 = io.imread(direct2+rti22)[:, :, :]
img18 = io.imread(direct2+dop22)[:, :, :]
except:
print('Missing Images From Directory')
try:
axes[2, 0].clear()
axes[2, 0].imshow(img13)
axes[2, 0].axis('off')
axes[2, 0].annotate('Node 2', xy=(0, 0.5), xytext=(-axes[2, 0].yaxis.labelpad - pad, 0),
xycoords=axes[2, 0].yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
axes[2, 1].clear()
axes[2, 1].imshow(img14)
axes[2, 1].axis('off')
axes[2, 2].clear()
axes[2, 2].imshow(img15)
axes[2, 2].axis('off')
axes[2, 3].clear()
axes[2, 3].imshow(img16)
axes[2, 3].axis('off')
axes[2, 4].clear()
axes[2, 4].imshow(img17)
axes[2, 4].axis('off')
axes[2, 5].clear()
axes[2, 5].imshow(img18)
axes[2, 5].axis('off')
except:
print('Could Not Plot Node 2 Images')
except:
print('Image Not Found')
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
|
from flask import Flask
from flask import render_template, flash
from flask_bootstrap import Bootstrap
from flask import request
from flask_wtf import Form
from wtforms import StringField , validators, SubmitField
from flask_wtf.file import FileField, FileRequired
from youtoplay import youtoplay
class LinkForm(Form):
url = StringField('URL:', [validators.URL])
submit = SubmitField('Add Song')
class SongForm(Form):
song = FileField(validators=[FileRequired('No file uploaded')])
submit = SubmitField('Add File')
app = Flask(__name__)
Bootstrap(app)
app.secret_key = 'mukul123'
@app.route('/', methods=['GET', 'POST'])
def index():
linkform = LinkForm(request.form)
songform = SongForm(request.form)
if request.method == 'POST' and (songform.validate() or linkform.validate()):
##Download here
if linkform.validate():
link = linkform.url.data
if len(link)>0:
youtoplay.youtube_download(link)
if songform.validate():
print("test")
f = songform.song.data
filename = secure_filename(f.filename)
f.save(app.instance_path)
youtoplay.upload_last()
flash('success')
return render_template('template.html', linkform=linkform, songform=songform)
|
# -*- coding: utf-8 -*-
from plsqldecoder.api.decoder.business import decode
def test_db_connection():
assert decode() == "Senhafacil#16"
|
s = input()
try:
print(int(s))
except Exception as e:
print(e)
print("rest of the code")
|
from django.contrib import admin
from miapp.models import Carro
# Register your models here.
admin.site.register(Carro)
|
import argparse
import bz2
import ftplib
try:
import manhole
except ModuleNotFoundError:
pass
import multiprocessing as mp
import numpy as np
import os
import re
import RSEntry as rse
import SlimRSCollection as srsc
from string import ascii_lowercase
import sys
import time
import traceback
import urllib.error
import urllib.request
# List of all chromosomes
CHRS = [str(i) for i in range(1,23)] + ['X', 'Y', 'MT']
def check_consdb_sorted(fn):
"""
Helper function to check if a ConsDB file is sorted by position.
Parameters:
fn: File to check
"""
chrom = 0
cur = 0
for line in rse.RSCollection.open(fn):
try:
line = line.decode()
except AttributeError:
pass
c, p = re.split('[:,]', line)[1:3]
c = rse.RSCollection.chrom_to_int(c)
p = int(p)
if c > chrom:
chrom = c
cur = 0
if p < cur or c < chrom:
return(False)
cur = p
return(True)
def check_downloads(db, chrs, fp='.', force=False):
"""
Function to download all necessary database files. Valid databases are:
* dbSNP
* 1000gp
* gnomAD
Database names are not case sensitive.
Parameters:
db: Database to use
chrs: List of chromosomes to use
fp: File path to store files
force: Whether or not to re-download files that are already downloaded
"""
db = db.lower()
if db == 'dbsnp':
file_base = ['refsnp-chr{}.json.bz2']
url_base = 'ftp.ncbi.nlm.nih.gov/snp/latest_release/JSON/{}'
ext = re.escape('.json.bz2')
elif db == '1000gp':
# Also need to download index file and pop file
# File with sexes:
# ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/working/20130606_sample_info/20130606_g1k.ped
fns = ['1000genomes.sequence.index', '20131219.populations.tsv']
urls = [('ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/'
'1000_genomes_project/{}'),
'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/{}']
for i in range(len(fns)):
fn = fns[i]
fn_full = f'{fp}/{fn}'
print(f'Checking for {fn}...', flush=True)
if os.path.isfile(fn_full) and not force:
print(f'{fn} already present.', flush=True)
else:
print(f'Downloading {fn}...', flush=True)
urllib.request.urlretrieve(urls[i].format(fn), fn_full)
file_base = [('ALL.chr{}.shapeit2_integrated_snvindels_v2a_27022019.'
'GRCh38.phased.vcf.gz')]
url_base = ('ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/'
'data_collections/1000_genomes_project/release/'
'20190312_biallelic_SNV_and_INDEL/{}')
ext = re.escape('.vcf.gz')
elif db == 'gnomad':
file_base = ['gnomad.genomes.v3.1.sites.chr{}.vcf.bgz']
url_base = ('https://storage.googleapis.com/gcp-public-data--gnomad/'
'release/3.1/vcf/genomes/{}')
ext = re.escape('.vcf.bgz')
# Attempt to download using FTP, otherwise use HTTPS
try:
if url_base[:3] == 'ftp':
serv, *directory = url_base[6:].split('/')
else:
raise ftplib.socket.gaierror
ftp = ftplib.FTP(serv)
ftp.login()
ftp.cwd('/'.join(directory[:-1]))
all_fns = ftp.nlst()
use_ftp = True
except ftplib.socket.gaierror:
use_ftp = False
for c in chrs:
if use_ftp:
print('Using FTP', flush=True)
fn_match = f'\.(?:chr)?{c}\..*{ext}$'
print(f'Looking for remote files matching {fn_match}...',
flush=True)
chr_fns = [fn for fn in all_fns if re.search(fn_match, fn)]
if len(chr_fns) == 0:
print(f'No file found for chr {c}.', flush=True)
continue
for fn in chr_fns:
fn_full = f'{fp}/{fn}'
print(f'Checking for {fn}...', flush=True)
if os.path.isfile(fn_full) and not force:
print(f'{fn} already present.', flush=True)
continue
with open(fn_full, 'wb') as ftp_fp:
print(f'Downloading {fn}...', flush=True)
ftp.retrbinary(f'RETR {fn}', ftp_fp.write)
else:
print('Using HTTPS', flush=True)
for i in range(len(file_base)):
fn = file_base[i].format(c)
fn_full = f'{fp}/{fn}'
print(f'Checking for {fn}...', flush=True)
if os.path.isfile(fn_full) and not force:
print(f'{fn} already present.', flush=True)
continue
try:
print(f'Downloading {fn}...', flush=True)
urllib.request.urlretrieve(url_base.format(fn), fn_full)
except (urllib.error.HTTPError, ftplib.error_perm,
urllib.error.URLError) as e:
print(f'Error downloading {fn}.', flush=True)
continue
if use_ftp:
ftp.quit()
def col_from_db(db, chrs, fp='.', chr_path=None, chr_maj=False, chr_all=False,
quiet=False):
"""
Create a RSCollection object for all variants on the given chromosome in the
given database.
Valid databases are:
* dbSNP
* 1000gp
* gnomAD
Database names are not case sensitive.
Parameters:
db: Name of database to use
chrs: List of chromosomes to use
fp: File path where the database files are stored
chr_path: File path to save individual chromosome ConsDB files
chr_maj: Indicates whether to save major allele variants
chr_all: Indicates whether to save all variants
quiet: Do not print log/progress messages
"""
db = db.lower()
if db == 'dbsnp':
file_base = ['{}/refsnp-chr{}.json.bz2']
db_parse = rse.RSCollection.from_dbsnp
elif db == '1000gp':
file_base = [('{}/ALL.chr{}.shapeit2_integrated_snvindels_v2a_27022019.'
'GRCh38.phased.vcf.gz')]
db_parse = lambda fn, quiet: rse.RSCollection.from_1000gp(fn,
f'{fp}/1000genomes.sequence.index',
f'{fp}/20131219.populations.tsv', quiet)
elif db == 'gnomad':
file_base = ['{}/gnomad.genomes.r2.1.1.sites.{}.vcf.bgz',
'{}/gnomad.exomes.r2.1.1.sites.{}.vcf.bgz']
db_parse = rse.RSCollection.from_gnomad
rsc = rse.RSCollection()
for c in chrs:
for fn in file_base:
fn = fn.format(fp, c)
if not os.path.isfile(fn): continue
print(f'Loading {fn}...', flush=True)
s = time.time()
rsc += db_parse(fn, quiet=quiet)
e = time.time()
print(f'Finished {fn}. Took {e-s} s.', flush=True)
if chr_path:
if chr_all:
fn = f'{chr_path}/chr{c}_rscol.gz'
print(f'Saving chromosome {c}...', flush=True)
rsc.dump(fn, None, c)
print(f'Chromosome {c} saved.', flush=True)
if chr_maj:
fn = f'{chr_path}/chr{c}_maj.gz'
print(f'Saving chromosome {c} major alleles...', flush=True)
rsc.get_major().dump(fn, None, c)
print(f'Chromosome {c} major alleles saved.', flush=True)
return(rsc)
def consdb_to_fasta(consdb_fn, in_fa, out_fa):
if check_consdb_sorted(consdb_fn):
lines = rse.RSCollection.open(consdb_fn)
else:
print((f'WARNING: File {consdb_fn} is not sorted, this operation may '
'take a lot of time/RAM. Consider pre-sorting.'))
lines = sorted([line.decode() if type(line) == bytes else line \
for line in rse.RSCollection.open(consdb_fn)],
key=rse.RSCollection.sort_rsidx_line)
## Load original genome
orig_gen, chr_header = load_genome(in_fa)
## Go through ConsDB file and create new genome
new_gen = {}
gen_ctr = {c: 0 for c in orig_gen.keys()}
for line in lines:
try:
line = line.decode()
except AttributeError:
pass
if line[0] == '#':
continue
line = re.split('[:,]', line.strip())
chrom = line[1]
pos = int(line[2]) - 1
rec_ref = line[3]
rec_alt = line[4]
# Make sure that we have a reference genome for this chromosome
try:
g = orig_gen[chrom]
except KeyError:
print(f'No reference found for chr{chrom}, skipping variants.')
orig_gen[chrom] = None
continue
if g is None:
continue
if pos >= len(g):
continue
if gen_ctr[chrom] > pos:
print((f'Skipping variant '
f'{line[1]}\t{line[2]}\t{line[3]}\t{line[4]}, overlapped by '
'prior variant.'))
continue
try:
n = new_gen[chrom]
except KeyError:
n = new_gen[chrom] = []
if g[pos:pos+len(rec_ref)] != rec_ref:
raise AssertionError(('Reference genome and VCF file disagree at '
f'{line[1]} {line[2]} (ref: {g[pos:len(rec_ref)]}, '
f'vcf: {rec_ref}).'))
n.append(g[gen_ctr[chrom]:pos])
n.append(rec_alt)
gen_ctr[chrom] = pos + len(rec_ref)
## Print new genome
with open(out_fa, 'w') as fp:
for c in CHRS:
try:
g = new_gen[c]
if gen_ctr[c] < len(orig_gen[c]):
g.append(orig_gen[c][gen_ctr[c]:])
except KeyError:
if c in orig_gen and orig_gen[c]:
g = orig_gen[c]
else:
continue
fp.write(chr_header[c])
g = ''.join(g)
for i in range(0, len(g), 70):
fp.write(f'{g[i:i+70]}\n')
try:
lines.close()
except AttributeError:
pass
def filter_vcf_pers(fn_in, fn_out, pers_id, het='rand'):
"""
Filter a VCF file for a given individual.
Possible choices for het are:
* 'rand': Randomly pick a haplotype at each position
* [0,1]: Choose either left or right haplotype at each position
Parameters:
fn_in: VCF file to filter
fn_out: Output filename
pers_id: ID of the individual to filter
het: How to handle heterozygous variants
"""
try:
het = int(het)
except ValueError:
het = het.lower()
except TypeError:
het = 'rand'
if het not in {'rand', 0, 1}:
raise ValueError(f'Bad option for het: {het}')
samp_idx = None
pos_set = set()
with open(fn_out, 'w') as fp:
for line in rse.RSCollection.open(fn_in):
try:
line = line.decode()
except AttributeError:
pass
if line[:2] == '##':
fp.write(line)
continue
line = np.asarray(line.strip().split('\t'))
if line[0] == '#CHROM' and samp_idx is None:
samp_idx = np.asarray(line) == pers_id
samp_idx[:9] = True
line = '\t'.join(line[samp_idx])
fp.write(f'{line}\n')
continue
if line[0][0] == '#':
continue
rec_alts = line[4].split(',')
rec_info = {}
for inf in line[7].split(';'):
inf = inf.split('=')
k = inf[0]
try:
v = inf[1].split(',')
if len(v) == 1:
v = v[0]
except IndexError:
v = 0
rec_info[k] = v
line = line[samp_idx]
# Only want one first variant at any given position, just take the
# first one seen
if tuple(line[:2]) in pos_set:
continue
gt = re.split(r'[|/]', line[-1])
if (len(np.unique(gt)) == 1):
if gt[0] == '0':
continue
pos_set.add(tuple(line[:2]))
# Need to adjust alt index (0 is ref in VCF files)
line[4] = rec_alts[int(gt[0])-1]
line[7] = ';'.join([f'{k}={v[int(gt[0])-1]}' if type(v) is list \
else (f'{k}={v}' if v != 0 else k) \
for k,v in rec_info.items()])
line = '\t'.join(line)
fp.write(f'{line}\n')
elif het == 'rand':
gt = np.random.choice([0,1])
if gt == 0:
continue
pos_set.add(tuple(line[:2]))
# Need to adjust alt index (0 is ref in VCF files)
line[4] = rec_alts[gt-1]
line[7] = ';'.join([f'{k}={v[gt-1]}' if type(v) is list \
else (f'{k}={v}' if v != 0 else k) \
for k,v in rec_info.items()])
line = '\t'.join(line)
fp.write(f'{line}\n')
elif type(het) == int:
try:
gt = int(gt[het])
except IndexError:
raise ValueError(f'Bad argument for het: {het}.')
if gt == 0:
continue
pos_set.add(tuple(line[:2]))
# Need to adjust alt index (0 is ref in VCF files)
line[4] = rec_alts[gt-1]
line[7] = ';'.join([f'{k}={v[gt-1]}' if type(v) is list \
else (f'{k}={v}' if v != 0 else k) \
for k,v in rec_info.items()])
line = '\t'.join(line)
fp.write(f'{line}\n')
def find_rsid_frags(c, fp_in, fns, vcf=True):
"""
Build dictionary mapping from (chr, pos) -> list of files containing that
position. This is used in the load_and_save method so that chunks can be
saved safely without worrying about having two entries at the same position
in the final ConsDB file.
Parameters:
c: Chromosome
fp_in: Input file path
fns: List of file fragment names to look for variants in
vcf: Indicates if the passed file fragments came from a VCF file
"""
frag_dict = {}
n_frags = len(fns)
if vcf:
rs_match = ('^(?:chr)?([0-9XYMT]{1,2})\s+([0-9]+)\s+([.;a-z0-9]+)\s+'
'([A-Z]+)\s+([A-Z,]+)')
else:
rs_match = '"refsnp_id":"([0-9]+)"'
count = 1
for fn in fns:
if count % 15 == 0:
print((f'chr {c}: Finding rsid fragments '
f'(fragment {count}/{n_frags})'), flush=True)
count += 1
with open(f'{fp_in}/{fn}', 'r') as fp:
for line in fp:
m = re.search(rs_match, line)
try:
if vcf:
# Use (chr, pos) to avoid entries that may need to be
# grouped together but one is missing rsid
# Need to account for cases with multiple alts
rsid = []
for alt in m.group(5).split(','):
rsid.append((m.group(1), m.group(2)))
else:
rsid = [m.group(1)]
except AttributeError as e:
if line[0] != '#' and '<' not in line.split('\t')[4]:
print(f'chr {c}: rsid not found in {line}', flush=True)
raise e
continue
for rs in rsid:
try:
frag_dict[rs].add(fn)
except KeyError:
frag_dict[rs] = {fn}
return(frag_dict)
def fragment_files(c, db, fp_in='.', fp_out='./frags/', nlines=10000):
"""
Function to create file fragments of nlines for the given chromosome.
Designed to approximate the split GNU program.
Valid databases are:
* dbSNP
* 1000gp
* gnomAD
Database names are not case sensitive.
Parameters:
c: Chromosome to use
db: Database being used
fp_in: Input file path
fp_out: Output file path to store file fragments
nlines: Number of lines each fragment should contain
"""
c_fp = f'{fp_out}/chr{c}_frag_'
db = db.lower()
if db == 'dbsnp':
ext = re.escape('.json.bz2')
elif db == 'gnomad':
ext = re.escape('vcf.bgz')
elif db == '1000gp':
ext = re.escape('.vcf.gz')
else:
print(f'db: {db}')
raise AssertionError
fns = os.listdir(fp_in)
fn_match = f'\.(?:chr)?{c}\..*{ext}$'
chr_fns = [fn for fn in fns if re.search(fn_match, fn)]
frag_fns = []
for fn in chr_fns:
fn = f'{fp_in}/{fn}'
fn_lines = 0
for line in rse.RSCollection.open(fn):
try:
line = line.decode()
except AttributeError:
pass
if line[0] == '#':
continue
else:
fn_lines += 1
# Calculate number of fragment files needed and max number of letters
# needed
n_files = np.ceil(fn_lines / nlines)
n_frag_letter = int(np.ceil(np.log(n_files)/np.log(26)))
header = []
frag = np.zeros(n_frag_letter, dtype=int)
counter = 0
fp_frag = open(f'{c_fp}{idx_to_frag(frag)}', 'w')
frag_fns.append(f'{c_fp}{idx_to_frag(frag)}')
for line in rse.RSCollection.open(fn):
try:
line = line.decode()
except AttributeError:
pass
if line[0] == '#':
header.append(line)
continue
# If we've written nlines to the current file, open up the next file
if counter == nlines:
counter = 0
frag[-1] += 1
i = -1
# Increment any indices that need to be incremented
while frag[i] == 26:
frag[i] = 0
frag[i-1] += 1
i -= 1
fp_frag.close()
fp_frag = open(f'{c_fp}{idx_to_frag(frag)}', 'w')
frag_fns.append(f'{c_fp}{idx_to_frag(frag)}')
# Write the VCF header at the beginning of any new file
if counter == 0:
fp_frag.write(''.join(header))
fp_frag.write(line)
counter += 1
return(frag_fns)
def idx_to_frag(idx):
"""
Convert a list of integers in the range [0,25] to an alphabetic string of
the same length. Helper method for fragment_files().
0 -> a
1 -> b
...
Parameters:
idx: List of alphabetic indeces
"""
return(''.join([ascii_lowercase[i] for i in idx]))
def join_fns(fns, fn_out):
"""
Join multiple files together by simple concatenation. Intended to be used to
combine multiple ConsDB files together after parallelised parsing.
Parameters:
fns: List of files
fn_out: File to output to
"""
if os.path.isfile(fn_out):
os.remove(fn_out)
with open(fn_out, 'wb') as fp_out:
for fn in fns:
for line in open(fn, 'rb'):
fp_out.write(line)
def join_vcfs(fns, fn_out):
"""
Join multiple VCF files together. Files will be added in order, so they
should already be sorted by chromosome.
Parameters:
fns: List of files
fn_out: File to output to
"""
# Get meta information
header_lines = []
filedate = False
for fn in fns:
for line in open(fn, 'r'):
line = line.strip()
if line[:2] != '##':
break
if line not in header_lines:
if 'fileDate' in line:
if filedate:
continue
filedate = True
header_lines.append(line)
# Get header line
for line in open(fns[0], 'r'):
line = line.strip()
if line.split('\t')[0] == '#CHROM':
header_lines.append(line)
break
with open(fn_out, 'w') as fp_out:
fp_out.write('\n'.join(header_lines) + '\n')
for fn in fns:
for line in open(fn, 'r'):
if line[0] == '#':
continue
fp_out.write(line)
def load_genome(gen_fn):
"""
Load a genome from a FASTA file. Returns a dictionary of chrom -> DNA seq
and a dictionary of chromosome headers.
Parameters:
gen_fn: Genome file
"""
orig_gen = {}
chr_header = {}
chrom = ''
for line in open(gen_fn, 'r'):
if line[0] == '>':
if chrom != '':
orig_gen[chrom] = ''.join(orig_gen[chrom])
chrom = line.split()[0].strip('>chr')
chr_header[chrom] = line
orig_gen[chrom] = []
continue
orig_gen[chrom].append(line.strip())
orig_gen[chrom] = ''.join(orig_gen[chrom])
return(orig_gen, chr_header)
def make_cons_vcf(fn_in, fn_out, pop=None, quiet=True):
"""
Make a consensus VCF file using the BitRSCollection class.
Parameters:
fn_in: ConsDB file to load
fn_out: VCF file to save to
pop: Which population to use
quiet: Load quietly
"""
if pop:
col = srsc.BitRSCollection.load_from_file_pop(fn_in, pop, quiet)
else:
col = srsc.BitRSCollection.load_from_file_full(fn_in, quiet)
col.dump_vcf(fn_out, cons=True, is_maj=('maj' in fn_in))
def merge_files(c, fps, fp_out, fmt='chr{}_{}.gz', merge_all=False,
merge_maj=False):
"""
Function to merge multiple ConsDB files. Used as a wrapper for the
corresponding function in the RSEntry module.
Parameters:
c: Chromsome to use
fps: File paths to look for input files
fp_out: Output file path
fmt: Format to use to look for files
merge_all: Indicates whether to merge files with all variants
merge_maj: Indicates whether to merge files with major allele variants
"""
fns = []
# Check how many placeholders in format to avoid loading/saving the same
# file multiple times
if len(re.findall('\{\}', fmt)) == 1:
fns += [('.', [f'{fp}/{fmt.format(c)}' for fp in fps \
if os.path.isfile(f'{fp}/{fmt.format(c)}')])]
else:
if merge_all:
fns += [('rscol', [f'{fp}/{fmt.format(c, "rscol")}' for fp in fps \
if os.path.isfile(f'{fp}/{fmt.format(c, "rscol")}')])]
if merge_maj:
fns += [('maj', [f'{fp}/{fmt.format(c, "maj")}' for fp in fps \
if os.path.isfile(f'{fp}/{fmt.format(c, "maj")}')])]
if all([len(i[1]) == 0 for i in fns]):
print(f'No files found for chr {c}.', flush=True)
return
for (ext, fn_list) in fns:
# Know each file is only present once, and extra arguments to format are
# ignored, so don't need to check for multiple fmt placeholders
fn_out = f'{fp_out}/{fmt.format(c, ext)}'
rse.RSCollection.merge_files(fn_list, fn_out, c)
def mp_load_and_save(c, db, fp_in='.', fp_out=None, store_all=False,
store_maj=False, quiet=False):
"""
Function to load an entire database file for the given chromosome and save
it as a ConsDB file.
Valid databases are:
* dbSNP
* 1000gp
* gnomAD
Database names are not case sensitive.
Parameters:
c: Chromosome to use
db: Database being used
fp_in: Input file path of the database files
fp_out: Output file path for ConsDB files
store_all: Indicates whether to store ConsDB files with all variants
store_maj: Indicates whether to store ConsDB files with major allele
variants
quiet: Disable log/progress messages
"""
db = db.lower()
if db == 'dbsnp':
ext = re.escape('.json.bz2')
db_parse = lambda fn: rse.RSCollection.from_dbsnp(fn, quiet)
elif db == '1000gp':
ext = re.escape('.vcf.gz')
db_parse = lambda fn: rse.RSCollection.from_1000gp(
fn, f'{fp_in}/1000genomes.sequence.index',
f'{fp_in}/20131219.populations.tsv', quiet)
elif db == 'gnomad':
ext = re.escape('.vcf.bgz')
db_parse = lambda fn: rse.RSCollection.from_gnomad(fn, quiet)
fns = os.listdir(fp_in)
fn_match = f'\.(?:chr)?{c}\..*{ext}$'
chr_fns = [fn for fn in fns if re.search(fn_match, fn)]
rsc = rse.RSCollection()
for fn in chr_fns:
# Copying the code from col_from_db here to avoid having to return
# large variables
fn = f'{fp_in}/{fn}'
if not os.path.isfile(fn):
print(f'{fn} not found.', flush=True)
continue
print(f'Loading {fn}...', flush=True)
s = time.time()
rsc += db_parse(fn)
e = time.time()
print(f'Finished {fn}. Took {e-s} s.', flush=True)
if fp_out:
if store_all:
print(f'Saving chromosome {c}...', flush=True)
rsc.dump(f'{fp_out}/chr{c}_rscol.gz', None, c)
print(f'Chromosome {c} saved.', flush=True)
if store_maj:
print(f'Saving chromosome {c} major alleles...', flush=True)
rsc.get_major().dump(f'{fp_out}/chr{c}_maj.gz', None, c)
print(f'Chromosome {c} major alleles saved.', flush=True)
def mp_load_and_save_chr(c, db, fp_db, fp_in, fp_out, store_all=False,
store_maj=False, quiet=False):
"""
Function to load database fragment files for the given chromosome and save
as a ConsDB file.
Valid databases are:
* dbSNP
* 1000gp
* gnomAD
Database names are not case sensitive.
Parameters:
c: Chromosome to use
db: Database being used
fp_db: File path containing the original database files
fp_in: File path containing the database file fragments
fp_out: Output file path for ConsDB files
store_all: Indicates whether to store ConsDB files with all variants
store_maj: Indicates whether to store ConsDB files with major allele
variants
quiet: Whether to disable log messages
"""
db = db.lower()
if db == 'dbsnp':
db_parse = lambda fn: rse.RSCollection.from_dbsnp(fn, quiet)
vcf = False
elif db == '1000gp':
db_parse = lambda fn, quiet: rse.RSCollection.from_1000gp(fn,
f'{fp_db}/1000genomes.sequence.index',
f'{fp_db}/20131219.populations.tsv', quiet)
vcf = True
elif db == 'gnomad':
db_parse = lambda fn, quiet: rse.RSCollection.from_gnomad(fn, quiet)
vcf = True
else:
print(f'db: {db}', flush=True)
raise AssertionError
match = [f'chr{c}_frag_[a-z]+']
fns = [fn for fn in os.listdir(fp_in) for m in match if re.search(m, fn)]
n_frags = len(fns)
# Create dictionary of the last fragment a (chrom, pos) was found in to know
# when it's ok to save an entry
rsid_frag_dict = find_rsid_frags(c, fp_in, fns, vcf)
if not quiet:
print(f'chr {c}:', len(rsid_frag_dict), 'rsids total', flush=True)
# Remove target files if they exist so we can open in append mode
save_fn = f'{fp_out}/chr{c}_rscol.gz'
idx_file = f'{save_fn}.idx'
maj_fn = f'{fp_out}/chr{c}_maj.gz'
maj_idx = f'{maj_fn}.idx'
if os.path.exists(save_fn):
os.remove(save_fn)
if os.path.exists(idx_file):
os.remove(idx_file)
if os.path.exists(maj_fn):
os.remove(maj_fn)
if os.path.exists(maj_idx):
os.remove(maj_idx)
if not quiet:
print (f'Loading chromosome {c}...', flush=True)
s = time.time()
all_size = 0
maj_size = 0
count = 1
rsc = rse.RSCollection()
seen_frags = set()
write_rsids = set()
for fn in fns:
if count % 15 == 0 and not quiet:
print(f'chr {c}: Loading fragment {count}/{n_frags}', flush=True)
rsc += db_parse(f'{fp_in}/{fn}', quiet=quiet)
count += 1
seen_frags.add(fn)
if db == 'dbsnp':
write_rsids.update([k for k in rsc.entries.keys() \
if rsid_frag_dict[k[0]].issubset(seen_frags)])
else:
write_rsids.update([k for k in rsc.entries.keys() \
if rsid_frag_dict[(str(k[1]), str(k[2]))].issubset(seen_frags)])
if not quiet:
print(fn)
print(len(write_rsids), 'rsids in write_rsids', flush=True)
print(len(rsc), 'rsids in rsc', flush=True)
if (len(write_rsids) >= 50000 or fn == fns[-1]) and fp_out:
if store_all:
if not quiet:
print(f'Writing {len(write_rsids)} entries to chromosome '
f'{c}...', flush=True)
all_size = rsc.dump(save_fn, idx_file, c, rsids=write_rsids,
old_size=all_size, append=True)
if not quiet:
print(f'Wrote to chromosome {c}.', flush=True)
if store_maj:
maj_size = rsc.get_major().dump(maj_fn, maj_idx, c,
rsids=write_rsids, old_size=maj_size, append=True)
rsc = rse.RSCollection()
write_rsids = set()
e = time.time()
if not quiet:
print(f'Finished chromosome {c}. Took {e-s} s.', flush=True)
def parse_chr_file(fn):
"""
Function to extract chromosomes to use from the given file.
Parameters:
fn: File name to use
"""
chrs = np.loadtxt(fn, dtype=object)
chr_idx = np.asarray([c in CHRS for c in chrs])
for c in chrs[~chr_idx]:
print('Unknown chromosome {}.'.format(c))
chrs = chrs[chr_idx]
print('Chromosomes to use: {}'.format(', '.join(chrs)))
return(chrs)
def print_traceback(c, err, tb):
"""
Function to print a traceback from given error and traceback objects.
Parameters:
c: Chromosome to use
err: Exception object
tb: Traceback object
"""
tb_str = '##########################\n'
tb_str += (f'Error "{c.__name__}: {err}" occured on line {tb.tb_lineno} '
f'in {tb.tb_frame.f_code.co_filename}.\n')
tb_str += 'Traceback:\n'
while tb.tb_next:
tb_str += (f'line {tb.tb_lineno} in {tb.tb_frame.f_code.co_filename} '
'->\n')
tb = tb.tb_next
tb_str += f'line {tb.tb_lineno} in {tb.tb_frame.f_code.co_filename}\n'
tb_str += '##########################'
print(tb_str, flush=True)
def queue_wrap(q, func, *args, **kwargs):
"""
Wrapper function around whatever function is being run in parallel. This
allows for catching errors thrown from a process in a multiprocessing Pool.
Normally these errors aren't thrown until all processes finish, which can
result in a lot of wasted time.
q: Queue object used for sending error messages to the parent process
func: Function to be called
args: Positional arguments to be sent to func
kwargs: Keyword arguments to be sent to func
"""
try:
res = func(*args, **kwargs)
except Exception as e:
exc = sys.exc_info()
print(f'Sending message to queue.', flush=True)
print_traceback(*exc)
q.put((None,e))
else:
q.put(('END',None))
return(res)
def vcf_to_fasta(vcf_fn, in_fa, out_fa):
## Load original genome
orig_gen, chr_header = load_genome(in_fa)
## Go through VCF file and create new genome
new_gen = {}
gen_ctr = {c: 0 for c in orig_gen.keys()}
for line in open(vcf_fn, 'r'):
if line[0] == '#':
continue
line = line.strip().split('\t')
chrom = line[0].strip('chr')
pos = int(line[1]) - 1
rec_ref = line[3]
# If multiple alts, just take the first one
rec_alt = line[4].split(',')[0]
# Make sure that we have a reference genome for this chromosome
try:
g = orig_gen[chrom]
except KeyError:
print(f'No reference found for chr{chrom}, skipping variants.')
orig_gen[chrom] = None
continue
if g is None:
continue
if pos >= len(g):
continue
if gen_ctr[chrom] > pos:
print((f'Skipping variant '
f'{line[0]}\t{line[1]}\t{line[3]}\t{line[4]}, overlapped by '
'prior variant.'))
continue
try:
n = new_gen[chrom]
except KeyError:
n = new_gen[chrom] = []
if g[pos:pos+len(rec_ref)] != rec_ref:
raise AssertionError(('Reference genome and VCF file disagree at '
f'{line[0]} {line[1]} (ref: {g[pos:len(rec_ref)]}, '
f'vcf: {rec_ref}).'))
n.append(g[gen_ctr[chrom]:pos])
n.append(rec_alt)
gen_ctr[chrom] = pos + len(rec_ref)
## Print new genome
with open(out_fa, 'w') as fp:
for c in CHRS:
try:
g = new_gen[c]
if gen_ctr[c] < len(orig_gen[c]):
g.append(orig_gen[c][gen_ctr[c]:])
except KeyError:
if c in orig_gen and orig_gen[c]:
g = orig_gen[c]
else:
continue
fp.write(chr_header[c])
g = ''.join(g)
for i in range(0, len(g), 70):
fp.write(f'{g[i:i+70]}\n')
################################################################################
def get_args():
"""
Parse command line arguments.
Valid run modes are:
Parse
Filter
Merge
Cons
"""
run_mode = sys.argv[1].lower()
if run_mode == 'parse':
parser = argparse.ArgumentParser(prog='ConsDB Parse',
description='Parse a variant database and output ConsDB files.')
## General arguments
parser.add_argument('-o', help='File to store output.')
parser.add_argument('-maj', help='File to store major alleles.')
parser.add_argument('-chr', default=['all'], nargs='+',
help=('Which chromosome(s) to use. Can either be a list of '
'numbers/letters or a file containing each chromosome to use '
'on a new line. Use "all" for all chromosomes.'))
parser.add_argument('-quiet', action='store_true',
help='Parse quietly.')
## Database arguments
parser.add_argument('-db', default='1000GP', help= ('Database to use '
'[dbSNP 1000GP gnomAD]. Uses 1000GP if no argument is supplied.'))
parser.add_argument('-db_path', default='.', help='Where to store '
'database downloads. Defaults to run path.')
parser.add_argument('-db_force', action='store_true', help='Whether or '
'not to force database downloading (overwrite existing files).')
parser.add_argument('-db_mp', action='store_true',
help=('Whether or not to use multiprocessing for downloading.'))
parser.add_argument('-db_proc', type=int, default=12,
help='Number of downloads to run concurrently.')
## Multiprocessing arguments
parser.add_argument('-mp', action='store_true',
help=('Whether or not to process files using multiple cores.'))
parser.add_argument('-mp_path', default='.',
help=('Where to store output files. Defaults to run path.'))
parser.add_argument('-mp_o', action='store_true',
help='Whether or not to store files containing all allele.')
parser.add_argument('-mp_maj', action='store_true',
help='Whether or not to store files containing only major allele.')
parser.add_argument('-mp_proc', type=int, default=12,
help='Number of processes to run concurrently.')
parser.add_argument('-preprocess', action='store_true',
help='Whether or not to fragment database files before processing.')
parser.add_argument('-pp_path', default='./frags/',
help='Directory to store fragment files. Defaults to run path.')
parser.add_argument('-pp_nlines', type=int, default=100000,
help='Number of lines per fragment file.')
parser.add_argument('-pp_clean', action='store_true',
help='Whether or not to remove fragment files.')
parser.add_argument('-pp_proc', type=int, default=12,
help='Number of processes to run concurrently.')
parser.add_argument('-no_frag', action='store_true',
help='Skip fragmenting database files.')
args = parser.parse_args(sys.argv[2:])
if not args.o and not args.maj and not \
(args.mp and (args.mp_o or args.mp_maj)):
raise RuntimeError(
'Must supply an output argument (-o, -maj, -mp_o, -mp_maj).')
if args.mp and not args.mp_o and not args.mp_maj:
raise RuntimeError(('Must select a multiprocessing output '
'(-mp_o and/or -mp_maj).'))
if args.o and args.mp and not args.mp_o:
raise RuntimeError(('Must use -mp_o when specifying -o in '
'multiprocessing mode.'))
if args.maj and args.mp and not args.mp_maj:
raise RuntimeError(('Must use -mp_maj when specifying -maj in '
'multiprocessing mode.'))
return(args)
## Filter VCF file
if run_mode == 'filter':
parser = argparse.ArgumentParser(prog='ConsDB Filter',
description='Filter a VCF file based on ConsDB files.')
parser.add_argument('-i', required=True,
help='Input VCF for filtering.')
parser.add_argument('-o', required=True, help='VCF filtering output.')
parser.add_argument('-consdb_path',
help='Directory containing ConsDB files.')
parser.add_argument('-pop', help='Population to use for filtering.')
parser.add_argument('-samp', help='Sample ID to use for filtering.')
parser.add_argument('-het', default='rand',
help='Option for handling heterozygous variants.')
parser.add_argument('-log', help='Log file to use for filtering.')
parser.add_argument('-cons', action='store_true', help=('Making a '
'consenus so only keep first major allele at each position.'))
parser.add_argument('-keep_samps', action='store_true',
help='Keep sample information in the filtered VCF.')
parser.add_argument('-quiet', action='store_true',
help='Filter quietly.')
args = parser.parse_args(sys.argv[2:])
if args.pop and args.samp:
raise RuntimeError('Can only specify one of (-pop, -samp).')
if (args.pop or args.samp is None) and args.consdb_path is None:
raise RuntimeError(('Must specify -consdb_path when not filtering '
'by sample.'))
return(args)
## Merge files
if run_mode == 'merge':
parser = argparse.ArgumentParser(prog='ConsDB Merge',
description='Merge multiple ConsDB files.')
parser.add_argument('-i', required=True, nargs='+',
help='Input files to be merged.')
parser.add_argument('-o', required=True, help='Merged output file.')
parser.add_argument('-chr', default=['all'], nargs='+',
help=('Which chromosome(s) to use. Can either be a list of '
'numbers/letters or a file containing each chromosome to use '
'on a new line. Use "all" for all chromosomes.'))
parser.add_argument('-fmt', default='chr{}_{}.gz',
help=('Format used to find files to merge. Use {} as a wildcard, '
'with one for chromosome, and the second for rscol/maj '
'(if desired).'))
## Multiprocessing arguments
parser.add_argument('-mp', action='store_true',
help='Use multiprocessing.')
parser.add_argument('-mp_proc', type=int, default=12,
help='Number of processes to run concurrently.')
parser.add_argument('-merge_all', action='store_true',
help='Merge ConsDB files with all alleles.')
parser.add_argument('-merge_maj', action='store_true',
help='Merge ConsDB files with major alleles.')
args = parser.parse_args(sys.argv[2:])
if all([os.path.isfile(fn) for fn in args.i]):
args.inp_type = 'file'
elif all([os.path.isdir(fn) for fn in args.i]):
args.inp_type = 'dir'
else:
raise RuntimeError('Input arguments must be all files or all '
'directories.')
if args.inp_type == 'file' and \
(os.path.exists(args.o) != os.path.isfile(args.o)):
raise RuntimeError(('Input arguments and output argument must be '
'the same type (all files or all directories).'))
if args.inp_type == 'dir':
if os.path.exists(args.o) != os.path.isdir(args.o):
raise RuntimeError(('Input arguments and output argument must '
'be the same type (all files or all directories).'))
if not args.merge_all and not args.merge_maj:
raise RuntimeError(('Must select -merge_all and/or -merge_maj '
'when merging from directories.'))
if args.inp_type == 'file' and args.mp:
raise RuntimeError(('Multiprocessing not currently supported when '
'using a list of files as the input.'), flush=True)
return(args)
## Make consensus VCF file
if run_mode == 'cons':
parser = argparse.ArgumentParser(prog='ConsDB Cons',
description='Create consensus VCF file.')
parser.add_argument('-i', required=True, nargs='+',
help='Input ConsDB files or directory path to use.')
parser.add_argument('-o', required=True, help='Output directory.')
parser.add_argument('-fmt', default='chr{}_{}.gz',
help=('Format used to find files. Use {} as a wildcard, with one '
'for chromosome, and the second for rscol/maj (if desired).'))
parser.add_argument('-pop', help='Population to make consensus for.')
parser.add_argument('-chr', default=['all'], nargs='+',
help=('Which chromosome(s) to use. Can either be a list of '
'numbers/letters or a file containing each chromosome to use '
'on a new line. Use "all" for all chromosomes.'))
parser.add_argument('-join', help=('If present, file to concatenate '
'individual chromosome VCF files to.'))
parser.add_argument('-clean', action='store_true',
help='Delete individual chromosome VCF files when done.')
parser.add_argument('-v', action='store_true', help='Load verbosely.')
## Multiprocessing arguments
parser.add_argument('-mp', action='store_true',
help='Use multiprocessing.')
parser.add_argument('-mp_proc', type=int, default=12,
help='Number of processes to run concurrently.')
args = parser.parse_args(sys.argv[2:])
if all([os.path.isfile(fn) for fn in args.i]):
args.inp_type = 'file'
elif all([os.path.isdir(fn) for fn in args.i]):
args.inp_type = 'dir'
else:
raise RuntimeError('Input arguments must be all files or all '
'directories.')
if args.inp_type == 'dir' and len(args.i) > 1:
raise RuntimeError('Only one input directory is supported.')
return(args)
## Make a consensus FASTA file, either from a VCF file or from a ConsDB file
if run_mode == 'fa':
parser = argparse.ArgumentParser(prog='ConsDB FA',
description='Create consensus FASTA file.')
parser.add_argument('-ref', required=True,
help='Reference genome FASTA file.')
parser.add_argument('-t', required=True, help='File to transform with.')
parser.add_argument('-o', required=True, help='Output file.')
args = parser.parse_args(sys.argv[2:])
# Check for VCF file suffix, otherwise assume it's a ConsDB file
if re.search('\.vcf(?:\.gz)?$', args.t):
args.ft = 'vcf'
else:
args.ft = 'consdb'
return(args)
def main():
try:
run_mode = sys.argv[1].lower()
except IndexError:
raise RuntimeError('No run mode given.')
if run_mode not in {'parse', 'filter', 'merge', 'cons', 'fa'}:
raise RuntimeError(f'Unknown run mode {sys.argv[1]}.')
args = get_args()
if run_mode not in {'filter', 'fa'}:
# Convert passed chromosomes argument to a list of chroms to use
if args.chr == ['all']:
chrs = CHRS
elif os.path.isfile(args.chr[0]):
chrs = parse_chr_file(args.chr[0])
else:
chrs = args.chr
if run_mode == 'parse':
# Open a manhole for process monitoring
# (https://github.com/ionelmc/python-manhole)
# try:
# manhole.install()
# except NameError:
# pass
# Ensure all needed database files are downloaded (using multiprocessing
# if the appropriate command-line argument was passed)
if args.db_mp:
nproc = min(mp.cpu_count(), args.db_proc, len(chrs))
with mp.Pool(processes=nproc) as pool:
pool.starmap(check_downloads, [(args.db, [c], args.db_path,
args.db_force) for c in chrs])
else:
check_downloads(args.db, chrs, args.db_path, args.db_force)
## If multiprocessing is enabled
if args.mp:
m = mp.Manager()
msg_q = m.Queue()
nproc = min(mp.cpu_count(), args.mp_proc, len(chrs))
# If using preprocessing (file fragmentation), fragment files as
# needed
pp_proc = min(mp.cpu_count(), args.pp_proc, len(chrs))
if args.preprocess and not args.no_frag:
frag_fns = {}
with mp.Pool(processes=pp_proc) as pool:
if not os.path.exists(args.pp_path):
os.mkdir(args.pp_path)
fn_lists = pool.starmap(fragment_files, [(c, args.db,
args.db_path, args.pp_path, args.pp_nlines) for c in chrs])
# Keep track of fragment files in order to delete them later
# if desired
frag_fns = set(np.concatenate(fn_lists))
with mp.Pool(processes=nproc) as pool:
if args.preprocess:
mp_args = [(msg_q, mp_load_and_save_chr, c, args.db,
args.db_path, args.pp_path, args.mp_path,
args.mp_o, args.mp_maj, args.quiet) for c in chrs]
# Use async so we can check for error messages
rsc_list = pool.starmap_async(queue_wrap, mp_args)
# Check for messages from all processes until all chrs have
# finished. If we get an error message, raise the error.
num_finished = 0
while num_finished < len(chrs):
print('Waiting for message from queue.', flush=True)
# Main process waits here until something is received
# from the queue (either an error message or a finished
# message)
m, err = msg_q.get()
if issubclass(err.__class__, Exception):
print('Error message received from queue.',
flush=True)
raise err
if m == 'END':
num_finished += 1
print((f'Finished {num_finished}/{len(chrs)} '
'chromosomes.'), flush=True)
continue
print('Queue received message:', m, err, flush=True)
# Remove fragment directory if requested
if args.pp_clean:
for fn in frag_fns:
os.remove(fn)
else:
rsc_list = pool.starmap(mp_load_and_save,
[(c, args.db, args.db_path, args.mp_path,
args.mp_o, args.mp_maj, args.quiet) for c in chrs])
# Join the separate chromosome ConsDB files into one large
# file for all alleles and one for major alleles (as requested)
if args.o:
fns = [f'{args.mp_path}/chr{c}_rscol.gz' for c in chrs]
join_fns(fns, args.o)
if args.maj:
fns = [f'{args.mp_path}/chr{c}_maj.gz' for c in chrs]
join_fns(fns, args.maj)
else:
rsc = col_from_db(args.db, chrs, args.db_path, args.mp_path,
args.mp_maj, args.mp_o, args.quiet)
if args.o:
rsc.dump_full(args.o)
if args.maj:
rsc.get_major().dump_full(args.maj)
## Merge files
if run_mode == 'merge':
if args.inp_type == 'file':
rse.RSCollection.merge_files(args.i, args.o)
else:
args.i = args.i[0]
if args.mp:
nproc = min(mp.cpu_count(), args.mp_proc, len(chrs))
with mp.Pool(nproc) as pool:
fun_args = [(c, args.i, args.o, args.fmt,
args.merge_all, args.merge_maj) for c in chrs]
pool.starmap(merge_files, fun_args)
else:
for c in chrs:
merge_files(c, args.i, args.o, args.fmt, args.merge_all,
args.merge_maj)
## Filter VCF file
if run_mode == 'filter':
if args.samp:
filter_vcf_pers(args.i, args.o, args.samp, args.het)
else:
srsc.BitRSCollection.filter_vcf(args.consdb_path, args.i, args.o,
args.pop, args.log, args.cons, args.keep_samps, args.quiet)
## Make consensus VCF file
if run_mode == 'cons':
if args.inp_type == 'file':
fns = args.i
else:
args.i = args.i[0]
os.listdir(args.i)
if args.pop is None:
fns = []
out_fns = []
for c in chrs:
if os.path.isfile(f'{args.i}/{args.fmt.format(c, "maj")}'):
fns.append(f'{args.i}/{args.fmt.format(c, "maj")}')
elif os.path.isfile(f'{args.i}/{args.fmt.format(c, "rscol")}'):
fns.append(f'{args.i}/{args.fmt.format(c, "rscol")}')
else:
print(f'WARNING: No file found for chr {c}')
continue
out_fns.append((f'{args.o}/chr{c}_'
f'{args.pop if args.pop else "pan"}_cons.vcf'))
else:
fns = []
out_fns = []
for c in chrs:
if os.path.isfile(f'{args.i}/{args.fmt.format(c, "rscol")}'):
fns.append(f'{args.i}/{args.fmt.format(c, "rscol")}')
else:
print(f'WARNING: No file found for chr {c}')
continue
out_fns.append((f'{args.o}/chr{c}_'
f'{args.pop if args.pop else "pan"}_cons.vcf'))
cmd_args = ((fns[i], out_fns[i], args.pop, not args.v) \
for i in range(len(fns)))
if args.mp:
nproc = min(mp.cpu_count(), args.mp_proc, len(fns))
with mp.Pool(nproc) as pool:
pool.starmap(make_cons_vcf, cmd_args)
else:
for args_list in cmd_args:
make_cons_vcf(*args_list)
if args.join:
join_vcfs(out_fns, args.join)
if args.clean:
for fn in out_fns:
os.remove(fn)
## Make consensus FASTA file
if run_mode == 'fa':
if args.ft == 'vcf':
vcf_to_fasta(args.t, args.ref, args.o)
else:
consdb_to_fasta(args.t, args.ref, args.o)
if __name__ == '__main__':
main()
|
from extensions.extensions import db
class RevokedToken(db.Model):
__tablename__ = 'revoke_jwt'
id = db.Column(db.Integer, primary_key=True)
access_jti = db.Column(db.String, unique=True, nullable=False)
refresh_jti = db.Column(db.String, unique=True, nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
|
import os
import secrets
from flask import render_template, flash, url_for, redirect, request, session
from flask_login import login_user, current_user, login_required
from PIL import Image
from .. import create_app
from .. import db
from functools import wraps
config_name = os.getenv('FLASK_CONFIG')
app = create_app(config_name)
def codefacture():
#Verfification de l'identification de la facture
facture_id=Facture.query.filter_by(boutique_id=current_user.boutique_id, facture_user=current_user).order_by(Facture.id.desc()).first()
id_facture_utilisateur=None
if facture_id is None:
id_facture_utilisateur=1
else:
id_facture_utilisateur=facture_id.id+1
codefactureuser="#{}-{}{}".format(id_facture_utilisateur, current_user.boutique_id,current_user.id) # Code de la facture
session["codefactureuser"]=codefactureuser
return codefactureuser
def verification_facture():
if 'codefactureuser' in session:
return session["codefactureuser"]
else:
return False
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/produit', picture_fn)
output_sz = (370,350)
i= Image.open(form_picture)
i.thumbnail(output_sz)
i.save(picture_path)
return picture_fn
def codeproduit():
#Verfification de l'identification du produit
produi_id=Produit.query.order_by(Produit.id.desc()).first()
id_prod=None
if produi_id is None:
id_prod=1
else:
id_prod=produi_id.id+1
codeproduit="#{}".format(id_prod) #Code partielle du produit
return codeproduit
def verification_de_role(role, droit_b, droit_d):
ver="Faux"
ver_b="Ok"
if role=="Gérant" or role=="Associé":
if droit_b !="Aucun" or droit_d !="Aucun":
flash("Le Gérant ou l'Associé, ne peut être associé à un dépôt ou boutique","danger")
return ver
else:
return ver_b
if role == "Vendeur":
if droit_b=="Aucun" and droit_d!="Aucun":
flash("Le vendeur est associé à une boutiqe","danger")
return ver
else:
return ver_b
if role == "Magasinier":
if droit_b!="Aucun" and droit_d=="Aucun":
flash("Magasinier est associé à un dépôt","danger")
return ver
else:
return ver_b
#Vérification du client
def client_defautl():
#Ajout de classification des clients
type_client=Typeclient.query.filter_by(nom_type='Normale').first()
id_type_client=type_client
if type_client is None:
type_client=Typeclient(nom_type='Normale', statut=True)
db.session.add(type_client)
db.session.commit()
id_type_client=type_client
#Ajout vérificaion du client dans la base de données
clienr='Tous'
client_code_add=None
client_ver=Client.query.filter_by(nom_client=clienr.title(), boutique_id=current_user.boutique_id).first()
if client_ver is None:
client=Client(nom_client=clienr.title(), client_typeclient=id_type_client, boutique_id=current_user.boutique_id)
db.session.add(client)
db.session.commit()
client_code_add=client
else:
client_code_add=client_ver
return client_code_add
def client_entree(nom):
#Gestion des clients
client_code_add=None
if nom =='':
return client_code_add
#Ajout de classification des clients
type_client=Typeclient.query.filter_by(nom_type='Normale').first()
#Ajout vérificaion du client dans la base de données
client_ver=Client.query.filter_by(nom_client=nom.title(), boutique_id=current_user.boutique_id).first()
if client_ver is None:
client=Client(nom_client=nom.title(), typeclient_id=type_client.id, boutique_id=current_user.boutique_id)
db.session.add(client)
db.session.commit()
client_code_add=client
else:
client_code_add=client_ver
return client_code_add
#Les identifiants de la facture du client
def id_facture_client():
if 'idfacture' in session:
return session["idfacture"]
else:
return False
|
# misc board functions
import analogio
import digitalio
import time
# set pins
def setDigitalIn(pin, _pull):
e = digitalio.DigitalInOut(pin)
e.switch_to_input(pull=_pull)
return e
def setDigitalOut(pin):
e = digitalio.DigitalInOut(pin)
e.direction = digitalio.Direction.OUTPUT
return e
def setAnalogIn(pin):
return analogio.AnalogIn(pin)
# read pins
def buttonPressed(digitalIn):
return digitalIn.value is True
def buttonPressedDebounce(digitalIn):
if digitalIn.value is True:
time.sleep(0.2)
return True
return False
def buttonPressedForLong(digitalIn, seconds, samples):
if digitalIn.value is False:
return False
for _ in range(samples):
if digitalIn.value is False:
return False
time.sleep(seconds/samples)
return True
class buttonMonitor:
def __init__(self, _digitalIn):
self.digitalIn = _digitalIn
self.lastState = self.digitalIn.value
def read(self):
state = self.digitalIn.value
changed = state != self.lastState
self.lastState = state
return state, changed
# write pins
def digitalSquare(digitalOut, delay):
digitalOut.value = True
time.sleep(delay)
digitalOut.value = False
time.sleep(delay)
def digitalSquares(digitalOut, repeat, delay):
for _ in range(repeat):
digitalSquare(digitalOut, delay)
|
from selenium import webdriver
import unittest
import time
#from pages.pageindex import *
#from pages.pageitemlist import *
#from pages.pageitem import *
#clase donde van a estar los casos de prueba
#hereda de unittest
class Items(unittest.TestCase):
def test_view_item_page(self):
driver = webdriver.Chrome('chromedriver.exe')
driver.get('http://localhost:9380/dependencias/crear')#abre el navegador en esta dirección
#busca el campo por "id" e ingresa el dato, para todos los campos
driver.find_element_by_id('nombre').send_keys('nombre')
driver.find_element_by_id('tipo').send_keys('tipo')
driver.find_element_by_id('localizacion').send_keys('localizacion')
driver.find_element_by_id('fecha').send_keys('fecha')
#Presiona el botón guardar
driver.find_element_by_name('guardar').click()
#espera 4 segundos, a veces la página no carga rápido y marca error
time.sleep(4)
#¿esto hay que repetirlo siempre?
'''
page_index = Page_index(self.driver)
page_item_list = Page_item_list(self.driver)
page_item = Page_item(self.driver)
page_index.search_items('dress')
page_item_list.click_first_item()
page_item.verify_text('Printed Summer Dress')
'''
def test_search_with_no_items(self):
'''
page_index = Page_index(self.driver)
page_index.search_items('computer')
'''
'''
def tearDown(self):
self.driver.quit()
'''
#Ejecutar la clase
if __name__ == '__main__':
unittest.main()
|
import cv2 as cv
# 톱니바퀴에서 unpinned mode
#img = cv.imread("irene.jpg", cv.IMREAD_UNCHANGED) # 원본 그대로 읽겠다는 의미, 3차원으로 읽어짐
img = cv.imread("irene.jpg", cv.IMREAD_GRAYSCALE) # 2차원으로 읽어짐 == 색값이 하나니까
print(img)
print(img.shape, img.dtype)
# shape : 세로(행의 수), 가로(열의 수), 색(3가지)
# ctrl + f5 누르면 이전에 실행했던 거 다시 실행
|
class Solution:
def longestCommonSubstr(self, S1, S2, n, m):
dp=[[0 for i in range(m+1)]for i in range(n+1)]
result=0
for i in range(1,n+1):
for j in range(1,m+1):
if S1[i-1]==S2[j-1]:
dp[i][j]=1+dp[i-1][j-1]
result=max(result,dp[i][j])
else:
dp[i][j]=0
return result
|
#!/usr/bin/env python
import pickle
import io
from lab_defs import teaching_length
from lab_mc import experiments, tutorials
experiments["LVT"] = tutorials["LVT"]
from reportlab.lib.units import mm
import csv
from lab_mc import cohort
from reportlab.graphics.barcode import code39
def process(filename):
barcodes = {True: {}, False: {}}
with open(filename, 'r') as f:
barcode_reader = csv.reader(f, delimiter='\t')
for record in barcode_reader:
if len(record) == 2:
barcodes[False][record[0]] = code39.Standard39(
record[1], barWidth=0.3 * mm, barHeight=14 * mm
)
barcodes[True][record[0]] = code39.Standard39(
record[1], barWidth=0.3 * mm, barHeight=7 * mm
)
return barcodes
barcodes1 = process("barcodes1.csv")
barcodes2 = process("barcodes2.csv")
barcodesAB = process("barcodesAB.csv")
barcodesCD = process("barcodesCD.csv")
barcodes_all = {5: barcodesAB,
10: barcodes1,
16: barcodesCD,
21: barcodes2}
def get_barcode(student_number, week, narrow):
return barcodes_all[week][narrow][student_number]
|
import numpy as np
class simplenet:
def __init__(self):
self.W = np.random.randn(2, 3)
def predict(self, x):
return np.dot(x, self.W)
def loss(self, x, t):
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
def numerical_graddient_2d(f, x):
if x.ndim == 1:
return _numerical_graddient_1d(f, x)
else:
grad = np.zeros_like(x)
for idx, x in enumerate(x):
grad[idx] = _numerical_graddient_1d(f, x)
return grad
def _numerical_graddient_1d(f, x):
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
# fxh1 = f(x)
fxh1 = f()
print('fxh1:', fxh1)
x[idx] = float(tmp_val) - h
# fxh2 = f(x)
fxh2 = f()
print('fxh2:', fxh2)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val
return grad
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x)
return np.exp(x) / np.sum(np.exp(x))
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7))
if __name__ == '__main__':
net = simplenet()
print(net.W)
x = np.array([0.6, 0.9])
t = np.array([0, 0, 1])
y = np.dot(x, net.W)
print('y:', y)
y_sm = softmax(y)
print(y_sm)
loss = cross_entropy_error(y_sm, t)
print(loss)
def f():
print("x:", x)
print("t:", t)
return net.loss(x, t)
# f = lambda _: net.loss(x, t)
dw = numerical_graddient_2d(f, net.W)
print(dw)
|
#!/usr/bin/env python
# Filename: stdin2clip.py
# Author: Saphalon (aka Chaim)
# Description:
# Command-line tool to copy standard input (stdin) to clipboard
# Required modules:
# pygtk, gtk
import sys
import pygtk
pygtk.require('2.0')
import gtk
try:
buffer = ''.join(sys.stdin.readlines())
c = gtk.Clipboard()
c.set_text(buffer)
c.store()
print 'Successfully copied the following to the clipboard:'
print '---------------------------------------------------'
print buffer
except:
sys.stderr.write('\nAn error occurred! Failed to copy to clipboard!\n')
|
#!/usr/bin/python
# -*-coding: utf-8 -*-
####################################################
## Estructura de control
####################################################
# Hagamos sólo una variable
una_variable = 5
# Aquí está una declaración de un 'if'. ¡La indentación es importante en Python!
# imprime "una_variable es menor que 10"
if una_variable > 10:
print "una_variable es completamente mas grande que 10."
elif una_variable < 10: # Este condición 'elif' es opcional.
print "una_variable es mas chica que 10."
else: # Esto también es opcional.
print "una_variable es de hecho 10."
"""
For itera sobre listas
imprime:
perro es un mamifero
gato es un mamifero
raton es un mamifero
"""
for animal in ["perro", "gato", "raton"]:
# Puedes usar % para interpolar strings formateados
print "%s es un mamifero" % animal
"""
`range(número)` retorna una lista de números
desde cero hasta el número dado
imprime:
0
1
2
3
"""
for i in range(4):
print i
"""
While itera hasta que una condición no se cumple.
imprime:
0
1
2
3
"""
x = 0
while x < 4:
print x
x += 1 # versión corta de x = x + 1
# Maneja excepciones con un bloque try/except
# Funciona desde Python 2.6 en adelante:
try:
# Usa raise para levantar un error
raise IndexError("Este es un error de indice")
except IndexError as e:
pass # Pass no hace nada. Usualmente harias alguna recuperacion aqui.
|
# Generated by Django 2.2.7 on 2019-12-09 20:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('info', '0012_whitelistemail'),
]
operations = [
migrations.CreateModel(
name='ValidatedStudent',
fields=[
('email', models.EmailField(max_length=254, primary_key=True, serialize=False)),
('name', models.CharField(max_length=64)),
('surname', models.CharField(max_length=64)),
('middlename', models.CharField(max_length=64)),
('group', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('payment_id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('money', models.IntegerField()),
('date', models.DateField()),
('compensation_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='info.Compensation')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='info.ValidatedStudent')),
],
),
]
|
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Framework for specifying encoders from ffmpeg.
This uses ffmpeg for encoding and decoding.
The default FFMPEG encoder uses mpeg4, so that we can see if it's roughly
compatible with the vpxenc-produced qualities.
"""
import encoder
import file_codec
class FfmpegCodec(file_codec.FileCodec):
def __init__(self,
name='ffmpeg-mpeg4',
formatter=None):
self.name = name
self.codecname = 'mpeg4'
self.extension = 'avi'
super(FfmpegCodec, self).__init__(
name,
formatter=(formatter or encoder.OptionFormatter(prefix='-', infix=' ')))
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set, ''))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = (
'%s -loglevel warning -s %dx%d -i %s -codec:v %s %s -b:v %dk -y %s' % (
encoder.Tool('ffmpeg'),
videofile.width, videofile.height,
videofile.filename, self.codecname,
parameters.ToString(),
bitrate, encodedfile))
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = "%s -loglevel warning -codec:v %s -i %s %s" % (
encoder.Tool('ffmpeg'),
self.codecname,
encodedfile, yuvfile)
return commandline
def ResultData(self, encodedfile):
return {'frame': file_codec.FfmpegFrameInfo(encodedfile)}
|
'''competitive_nets.py
Simulates various competitive networks
CS443: Computational Neuroscience
Alice Cole Ethan
Project 3: Competitive Networks
'''
import numpy as np
from scipy import ndimage
from scipy import signal
def leaky_integrator(I, A, B, t_max, dt):
'''A layer of leaky integrator neurons with shunting excitation.
Uses Euler's Method for numerical integration.
Parameters:
-----------
I: ndarray. shape=(N,).
Input vector (assumed to not vary with time here). Components map 1-to-1 to units.
For example, neuron 0 gets I[0], etc.
A: float.
Passive decay rate >= 0.
B: float.
Excitatory upper bound of each cell > 0.
t_max: float.
Maximum time ("real continuous time", not time steps) to simulate the network > 0.
dt: float.
Integration time step > 0.
Returns:
-----------
ndarray. shape=(n_steps, N).
Each unit in the network's activation at all the integration time steps.
'''
ret = np.empty((1, I.shape[0]))
x = np.zeros((1, I.shape[0]))
#time
t = 0
#while time is less than max time do the following
while t < t_max:
#time increase in iteration
t += dt
#iterate over all Inputs
for i in range(I.shape[0]):
#notebook equation to calculate change
change = (-A * x[:, i]) + ((B - x[:, i]) * I[i])
#add change every time
x[:, i] = x[:, i] + change * dt
#add the new neurons back to the return every time
ret = np.vstack((ret, x))
return ret
def sum_not_I(I):
'''Sums all the other elements in `I` across all dimensions except for the one in each position
Parameters:
-----------
I: ndarray. shape=(anything).
Input vector in any number of dimensions
Returns:
-----------
ndarray. shape=shape(I).
'''
return np.sum(I) - I
def lateral_inhibition(I, A, B, t_max, dt):
'''Shunting network with lateral inhibition
Parameters:
-----------
I: ndarray. shape=(N,).
Input vector (assumed to not vary with time here). Components map 1-to-1 to units.
For example, neuron 0 gets I[0], etc.
A: float.
Passive decay rate >= 0.
B: float.
Excitatory upper bound of each cell > 0.
t_max: float.
Maximum time ("real continuous time", not time steps) to simulate the network > 0.
dt: float.
Integration time step > 0.
Returns:
-----------
ndarray. shape=(n_steps, N).
Each unit in the network's activation at all the integration time steps.
'''
ret = np.empty((1, I.shape[0]))
x = np.zeros((1, I.shape[0]))
#time
t = 0
#while time is less than max time do the following
while t < t_max:
#time increase in iteration
t += dt
#iterate over all Inputs
for i in range(I.shape[0]):
#notebook equation to calculate change
not_i = sum_not_I(I)[i]
change = (-A * x[:, i]) + ((B - x[:, i]) * I[i]) - (x[:, i] * not_i)
#add change every time
x[:, i] = x[:, i] + change * dt
#add the new neurons back to the return every time
ret = np.vstack((ret, x))
return ret
def dist_dep_net(I, A=1, B=1, C=0, exc_sigma=0.1, inh_sigma=3.0, kerSz=3, t_max=3, dt=0.001):
'''Distant-dependent (convolutional) 1D shunting network
Parameters:
-----------
I: ndarray. shape=(N,).
Input vector (assumed to not vary with time here). Component i is CENTERED on cell i,
but due to convolution there is no longer a 1-to-1 mapping input-to-unit.
A: float.
Passive decay rate >= 0.
B: float.
Excitatory upper bound of each cell > 0.
C: float.
Inhibitory lower bound constant of each cell > 0.
e_sigma: float.
Standard deviation of the excitatory Gaussian convolution kernel
i_sigma: float.
Standard deviation of the inhibitory Gaussian convolution kernel
kerSz: int.
Length of the 1D convolution kernels
t_max: float.
Maximum time ("real continuous time", not time steps) to simulate the network > 0.
dt: float.
Integration time step > 0.
Returns:
-----------
ndarray. shape=(n_steps, N).
Each unit in the network's activation at all the integration time steps.
TODO:
- Create two small 1D 3x1 Gaussian kernels with different sigma values (see parameters above).
Select `kerSz` equally spaced sample points between -(`kerSz`-1)/2 and (`kerSz`-1)/2 when making
your kernel.
- Do separate 1D convolutions on the raw input to get the excitatory and inhibitory network
inputs (`same` boundary conditions; you do not need to implement this from scratch).
The rest should be the same as in previous simulations.
- Remember to add the inhibitory lower bound C to the network. For now set C=0
(to focus on other properties of the network).
NOTE: You may either write your own convolution code (e.g. based on last semester) or use
the built-in one in scipy.
'''
#excitatory kernel
exc = np.empty((kerSz, 1))
for k in range(kerSz):
exc[k, :] = np.power(np.e, (-1/exc_sigma**2) * (k - (kerSz // 2)) ** 2)
#inhibitory kernel
inh = np.empty((kerSz, 1))
for k in range(kerSz):
inh[k, :] = np.power(np.e, (-1/inh_sigma**2) * (k - (kerSz // 2)) ** 2)
#padding for convolution
pad = int(np.ceil((kerSz - 1) / 2))
I = np.expand_dims(np.pad(np.squeeze(I), pad), 1)
#initializing return value and time
ret = np.empty((1, I.shape[0]))
x = np.zeros((1, I.shape[0]))
t = 0
while t < t_max:
t += dt
#iterate over all Inputs
for i in range(pad, I.shape[0] - pad):
#convolution
Esum = 0
Ssum = 0
for j in range(kerSz):
Esum += I[i+j-pad, :] * exc[j]
Ssum += I[i+j-pad, :] * inh[j]
#equation from notebook for dxi/dt
change = (-A * x[:, i]) + (B - x[:, i]) * Esum - (C + x[:, i]) * Ssum
#add change every time
x[:, i] = x[:, i] + change * dt
#add the new activations back to the return every time
ret = np.vstack((ret, x))
return ret[:, pad:-pad]
def dist_dep_net_image(I, A, inh_sigma, kerSz, t_max, dt):
'''Distant-dependent (convolutional) 2D shunting network
NOTE: If the network simulation is too slow on your machine (e.g. you are using very large images),
you can solve for and replace the ODE with the steady state solution.
Parameters:
-----------
I: ndarray. shape=(N, img_height, img_width).
Input vector (assumed to not vary with time here).
A: float.
Passive decay rate >= 0.
i_sigma: float.
Standard deviation of the inhibitory Gaussian convolution kernel
kerSz: int.
Length of the 2D convolution kernels
t_max: float.
Maximum time ("real continuous time", not time steps) to simulate the network > 0.
dt: float.
Integration time step > 0.
Returns:
-----------
ndarray. shape=(n_steps, img_height, img_width).
Each unit in the network's activation at all the integration time steps.
NOTE: If you have issues holding all the time steps in memory, you can just return the return
at the final time step.
TODO:
- Adapt your previous distance dependent network code to 2D and the modified equation.
- Be sure to replace the excitatory convolution with I_ij. The logic is that we don't want to
blur individual pixel values in the image.
- To generate a 2D Gaussian kernel, generate a 1D one like before then use the matrix
multiplication "trick" (outer product) from the end of Project 0 to make a symmetric 2D Gaussian
kernel (a 1x25 1D kernel should make a 25x25 2D kernel).
I suggest plotting it to make sure this worked!
NOTE: You may either write your own convolution code (e.g. based on last semester) or use
the built-in one in scipy.
'''
inh = np.empty((kerSz, 1))
for k in range(kerSz):
inh[k, :] = np.power(np.e, (-1/inh_sigma**2) * (k - (kerSz // 2)) ** 2)
inh = inh @ inh.T
# print(inh.shape)
N = I.shape[0]
conv = np.zeros(I.shape)
print(I[0, :, :].shape)
for i in range(N):
# print(signal.convolve2d(I[i, :, :], inh, 'same').shape)
conv[i, :, :] = signal.convolve2d(I[i, :, :], inh, 'same')
ret = np.empty((1, I.shape[1], I.shape[2]))
x = np.zeros((1, I.shape[1], I.shape[2]))
t = 0
while t < t_max:
t += dt
change = -A * x + I - x*conv
x = x + change * dt
ret = np.vstack((ret, x))
return ret
def rcf(I, A, B, fun_str, t_max, dt, F=0):
'''Recurrent competitive field network
Parameters:
-----------
I: ndarray. shape=(N,).
Input vector (assumed to not vary with time here). Components map 1-to-1 to units.
For example, neuron 0 gets I[0], etc.
A: float.
Passive decay rate >= 0.
B: float.
Excitatory upper bound of each cell > 0.
fun_str: str.
Name of recurrent feedback function to use in the network. Options are:
'linear', 'faster_than_linear', 'slower_than_linear', 'sigmoid'
t_max: float.
Maximum time ("real continuous time", not time steps) to simulate the network > 0.
dt: float.
Integration time step > 0.
F: float.
Parameter in slower-than-linear and sigmoid functions that controls inflection point > 0.
Returns:
-----------
ndarray. shape=(n_steps, N).
Each unit in the network's activation at all the integration time steps.
'''
I = np.asarray(I)
ret = np.empty((1, I.shape[0]))
x = I
#time
t = 0
#while time is less than max time do the following
while t < t_max:
#time increase in iteration
t += dt
f = f_function(fun_str, x, F)
#iterate over all Inputs
for i in range(I.shape[0]):
#notebook equation to calculate change
change = (-A * x[i]) + ((B - x[i]) * f[i] - x[i] * sum_not_I(f)[i])
#add change every time
x[i] = x[i] + change * dt
#add the new neurons back to the return every time
ret = np.vstack((ret, x))
return ret
def f_function(fun_str, x, F=0):
'''
'''
if fun_str == 'linear':
f = x
elif fun_str == 'faster_than_linear':
f = np.square(x)
elif fun_str == 'slower_than_linear':
f = np.true_divide(x, x+F)
else: #sigmoid
f = np.true_divide(np.square(x), F+np.square(x))
return f
|
from rubicon_ml.domain.utils.training_metadata import TrainingMetadata
__all__ = ["TrainingMetadata"]
|
from typing import Union
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input, State
from dash.exceptions import PreventUpdate
from .dash_app import DashApp
from .._vis_base import display_name
REFRESH_INTERVAL = Input('playing-refresh-interval', 'n_intervals')
def build_status_bar(app: DashApp):
layout = html.Div([
html.Div(id='status-bar', children=['Loading status...'], style={'backgroundColor': '#E0E0FF'}),
dcc.Interval(id='status-interval', interval=500),
])
@app.dash.callback(Output('status-bar', 'children'), [Input('status-interval', 'n_intervals'), STEP_COMPLETE, PLAYING])
def update_status_bar(*_):
return [app.status_message]
return layout
PLAY_BUTTON = Input('play-button', 'n_clicks')
PLAYING = Input(PLAY_BUTTON.component_id, 'style')
PAUSE_BUTTON = Input('pause-button', 'n_clicks')
STEP_BUTTON = Input('step-button', 'n_clicks')
STEP_COMPLETE = Input('step-complete', 'children')
STEP_COUNT = State('step-count', 'value')
def all_actions(app: DashApp):
return tuple(Input(f'action_{action.name}', 'n_clicks') for action in app.model.actions)
def build_player_controls(app: DashApp):
layout = html.Div(style={'height': '30px'}, children=[
html.Button('Play', id=PLAY_BUTTON.component_id),
html.Button('Pause', id=PAUSE_BUTTON.component_id),
html.Button('Step', id=STEP_BUTTON.component_id),
dcc.Textarea(placeholder='#steps', id=STEP_COUNT.component_id, value='', rows=1, style={'width': 70}),
*[html.Button(display_name(action.name), id=f'action_{action.name}') for action in app.model.actions],
html.Div(style={'display': 'none'}, id=STEP_COMPLETE.component_id),
])
@app.dash.callback(Output(PLAY_BUTTON.component_id, 'style'), inputs=[PLAY_BUTTON], state=[STEP_COUNT])
def play(n_clicks, step_count):
if n_clicks and not app.play_status:
step_count = parse_step_count(step_count, app, default=None)
app.play(max_steps=step_count)
else:
raise PreventUpdate()
@app.dash.callback(Output(PAUSE_BUTTON.component_id, 'style'), [PAUSE_BUTTON])
def pause_simulation(n_clicks):
if n_clicks:
app.pause()
raise PreventUpdate()
@app.dash.callback(Output(STEP_BUTTON.component_id, 'style'), [STEP_BUTTON])
def simulation_step(n_clicks):
if n_clicks and not app.play_status:
app.model.progress()
raise PreventUpdate()
@app.dash.callback(Output(STEP_COMPLETE.component_id, 'children'), [STEP_BUTTON, PAUSE_BUTTON])
def simulation_step(step, pause):
return ['%s / %s' % (step, pause)]
for action in app.model.actions:
@app.dash.callback(Output(f'action_{action.name}', 'disabled'), [Input(f'action_{action.name}', 'n_clicks')])
def perform_action(n_clicks, action=action):
if n_clicks is not None:
app.model.run_action(action.name)
raise PreventUpdate()
return layout
def parse_step_count(step_count, app, default: Union[int, None] = 1):
if step_count is None:
return default
try:
step_count = step_count.strip()
if step_count.startswith('*'):
step_count = app.model.sequence_stride * int(step_count[1:].strip())
else:
step_count = int(step_count)
return step_count
except ValueError:
return default
|
#!usr/bin/env python3
"""
Functional programming helpers
"""
# TODO:
# Add docs
import inspect, sys
from functools import reduce
def curry(f):
"""A function decorator that automaticly curries a function.
Examples:
>>> @curry
>>> def add3(a,b,c):
... return a + b + c
>>> add3(1)(2)(3)
6
>>> add3(1,2)(3)
6
>>> add3(1)(b=2)(c=3)
6
>>> add3()(1,2,3)
6
>>> add3(1,2,3)
6
"""
def inner1(*args, **kwargs):
f_args, f_kwargs = list(args), dict(kwargs)
def tryret():
try:
return f(*f_args, **f_kwargs)
except TypeError as e:
if "missing" in e.args[0]:
return inner2
else:
raise
def inner2(*args, **kwargs):
f_args.extend(args)
f_kwargs.update(kwargs)
return tryret()
return tryret()
return inner1
class Pipeline:
"""A Pipeline object allows you to send data through a chain of functions
without having to nest a bunch of parentheses or using temporary variables.
Examples:
>>> import math
>>> p = Pipeline(math.pi)
>>> p.then(lambda n: n*2).then(math.cos).run().result
1.0
This object has three (3) public data members:
self.data = The original data passed to the constructor.
self.result = The result of running all the functions on self.data,
or None if the run method hasn't been called.
"""
def __init__(self, data):
"Creates a Pipeline object out of data"
self.data = data
self.result = None
self.hasrun = False
self._funcs = []
def then(self, func):
"Arranges for func to be called next in the pipeline"
if not callable(func):
raise TypeError("func must be callable")
self._funcs.append(func)
self.hasrun = False
return self
def run(self, force=False):
"""Runs all of the functions on the data.
This function is lazy. If no more functions have been added
and this method has been run before, it will not do anything.
You can change this by passing True for the force argument"""
if force or not self.hasrun:
self.result = reduce( (lambda data, func: func(data)),
self._funcs,
self.data)
self.hasrun = True
return self
class dispatch:
"""A function decorator for Haskell-style multidispactch
on parameter values, types, and on predicates"""
def __init__(self, func):
self._base = func
self._overloads = []
self.__doc__ = self._base.__doc__
def match(self, func):
"Adds func as a possible dispatch function"
self._overloads.append(func)
return self
def __call__(self, *args, **kwargs):
"Finds the dispatch function that matches the given args and kwargs, and calls it"
for overload in self._overloads:
sig = inspect.signature(overload)
try:
bound = sig.bind(*args, **kwargs)
except TypeError:
pass
else:
for name, val in bound.arguments.items():
t = sig.parameters[name].annotation
if not (t != inspect._empty and t == val or
(not inspect.isclass(t) and callable(t) and t(val)) or
(isinstance(val, t) if (inspect.isclass(t) or type(t) == tuple) else False)):
break
else:
return overload(*args, **kwargs)
return self._base(*args, **kwargs)
def compose(*funcs):
"""Takes one or more functions and returns a new function that calls each function on the data
compose(f, g)(x) is the same as f(g(x))"""
if not funcs:
raise TypeError("compose() takes one or more arguments (0 given)")
if not all(callable(f) for f in funcs):
raise TypeError("compose takes one or more callables")
def call(val):
return reduce((lambda data, func: func(data)), reversed(funcs), val)
return call
def cascade(func, times):
"Returns a function that calls f(f(... f(x))) times times."
if not callable(func):
raise TypeError("func is not a callable")
return compose(*([func] * times))
def foreach(iterable, func):
"Calls func on each item of iterable"
if not callable(func):
raise TypeError(repr(func) + " is not callable")
for thing in iterable:
func(thing)
def id_(thing):
"Returns a function that always returns thing"
return lambda: thing
def tail_call(g):
"""
This function decorates a function with tail call optimization.
It does this by throwing an exception
if it is it's own grandparent, and catching such
exceptions to fake the tail call optimization.
This function fails if the decorated
function recurses in a non-tail context.
This function is adapted from http://code.activestate.com/recipes/474088/
"""
class TailRecurseException(Exception):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def func(*args, **kwargs):
f = sys._getframe()
if f.f_back and f.f_back.f_back \
and f.f_back.f_back.f_code == f.f_code:
raise TailRecurseException(args, kwargs)
else:
while True:
try:
return g(*args, **kwargs)
except TailRecurseException as e:
args = e.args
kwargs = e.kwargs
func.__doc__ = g.__doc__
return func
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 14:18:38 2020
"""
from google.cloud import storage
import os
import io
import time
from datetime import datetime as dt
class StoreModel:
def __init__(self):
self.model_link = None
return
def store_model(self, local_file):
#set google credentials and init google storage client
os.environ['GOOGLE_APPLICATION_CREDENTIALS']="[USER KEY]"
client = storage.Client()
sourceFile = local_file
destFile = 'model' + dt.now().strftime('%H%M%S') + '.h5'
fileLink = 'https://storage.googleapis.com/[BUCKET NAME]/' + destFile
#upload model file to google storage
if self.upload_blob('[MODELS BUCKET NAME]', sourceFile, destFile):
self.model_link = fileLink
def upload_blob(self, bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
return True
def get_model_link(self):
return self.model_link
def main():
storeModel = StoreModel()
storeModel.store_model(local_file='./saved_models/model.txt')
print(storeModel.get_model_link())
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# graphDist.py
# by: Mike Pozulp
# graph a plot of variance dependent
# on network distance for all observations
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import pylab
import csv
import sys
import numpy
import math
def printUsage():
print ('Usage: graphDist.py {-t|-m|-a} [data.csv]\n'
'\t -t use total distance (default) \n'
'\t -m use median distance \n'
'\t -a use average distance \n'
'\t -x use maximum distance \n')
def dist(benchdata, flag):
# records should come in as
# jobid, total, median, average
if flag == '-t':
measure = 1
elif flag == '-m':
measure = 2
elif flag == '-a':
measure = 3
elif flag == '-x':
measure = 4
with open(benchdata, 'r') as f:
recordlist = csv.reader(f)
next(recordlist) #skip first line
# map jobid to distance measure
distDict = {}
for row in recordlist:
jobid = row[0]
dist = row[measure]
distDict[jobid] = float(dist)
return distDict
def get_zlist(benchdata):
with open(benchdata, 'r') as f:
recordlist = csv.reader(f)
next(recordlist) #skip first line
# sort by nproc into dictionary
pdict = {}
# maintain separate jobid dict list
jdict = {}
for row in recordlist:
jobid = row[0]
nprocs = row[1]
speed = float(row[2])
if nprocs in pdict:
pdict[nprocs].append(speed)
jdict[nprocs].append(jobid)
else:
pdict[nprocs] = [speed]
jdict[nprocs] = [jobid]
# list for collecting (jobid, z) tuples
# with jobid later being converted to dist
zjob = []
for xkey in pdict:
# get mean and std for set of observations
std = numpy.std(pdict[xkey])
mean = numpy.mean(pdict[xkey])
# calculate z-scores for each observation
for obsv, jid in zip(pdict[xkey], jdict[xkey]):
if std != 0:
z = (obsv - mean) / std
else:
z = 0
zjob.append( (jid, abs(z)) )
#return zip(*zjob)[0], zip(*zjob)[1]
return zjob
def main():
NETCSV = 'net.csv'
if len(sys.argv) < 2:
printUsage(); sys.exit()
# default use average
flag = '-a'
start = 1
if sys.argv[1] == '-t':
flag = '-t'
start = 2
elif sys.argv[1] == '-m':
flag = '-m'
start = 2
elif sys.argv[1] == '-a':
flag = '-a'
start = 2
elif sys.argv[1] == '-x':
flag = '-x'
start = 2
zDict = {}
for benchdata in sys.argv[start:]:
if benchdata.split('/')[-1] == NETCSV:
distDict = dist(benchdata, flag)
else:
zjobs = get_zlist(benchdata)
for jobid, zscore in zjobs:
if jobid in zDict:
zDict[jobid].append(zscore)
else:
zDict[jobid] = [zscore]
# now we need to assemble (x,y) pairs from the
# zDict mapping jobids to lists of zscores and the
# distDict mapping jobids to distances
x = []
y = []
for jobid, zlist in zDict.iteritems():
for z in zlist:
x.append( distDict[jobid] )
y.append( z )
plt.scatter(x,y)
plt.title('Dependence of NPB Performance Variance on '
'Logical Network Distance')
plt.ylabel('Flops Z-Score')
if flag == '-t':
measure = 'Total'
elif flag == '-m':
measure = 'Median'
elif flag == '-a':
measure = 'Mean'
elif flag == '-x':
measure = 'Max'
plt.xlabel( measure + ' Logical Network Distance')
# tighten the viewing window by finding
# the maximum x- and y- values, then
# adding a p*100% cushion for all 4 boundaries
xmax = 0
ymax = 0
for xval in x:
if xval > xmax:
xmax = xval
for yval in y:
if yval > ymax:
ymax = yval
p = 0.05
x_min = 0 - p * xmax
y_min = 0 - p * ymax
x_max = xmax + xmax * p
y_max = ymax + ymax * p
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
fname = ('npbGraphs/dist/' +
measure.lower() + 'Scatter.png')
print 'saving ' + fname
plt.savefig(fname)
if __name__ == '__main__':
main()
|
"""
Date: March 20 2019
@author: Dhynasah Cakir
"""
import pandas as pd
import numpy as np
from collections import defaultdict
from pprint import pprint
def addToDict(document,dict_index,word_dicts):
'''
Parameters:
1. a single document
2. dict_index - implies to which category this document belongs to
What the function does:
-----------------------
It splits the document on the basis of space as a tokenizer and adds every tokenized word to
its corresponding dictionary
Returns:
---------
Nothing
'''
if isinstance(document,np.ndarray): document=document[0]
for token_word in document: #for every word in document
word_dicts[dict_index][token_word]+=1 #increment in its count
def train(dataset,labels,classes):
'''
Parameters:
1. dataset
2. labels
3. unique classes
What the function does:
-----------------------
This is the training function which will train the Naive Bayes Model i.e compute a dictionary for each
category/class.
Returns:
---------
category information i.e prior probability and denominator value for each class
'''
documents=dataset
labels=labels
word_dicts=np.array([defaultdict(lambda:0) for index in range(classes.shape[0])])
if not isinstance(documents,np.ndarray): documents=np.array(documents)
if not isinstance(labels,np.ndarray): labels=np.array(labels)
#constructing dictionary for each category
for cat_index,cat in enumerate(classes):
all_cat_docs=documents[labels==cat] #filter all documents of category == cat
all_cat_docs=pd.DataFrame(data=all_cat_docs)
#now costruct dictionary of this particular category
np.apply_along_axis(addToDict,1,all_cat_docs,cat_index,word_dicts)
prob_classes=np.empty(classes.shape[0])
all_words=[]
cat_word_counts=np.empty(classes.shape[0])
for cat_index,cat in enumerate(classes):
#Calculating prior probability p(c) for each class
prob_classes[cat_index]=np.sum(labels==cat)/float(labels.shape[0])
cat_word_counts[cat_index]=np.sum(np.array(list(word_dicts[cat_index].values())))+1
#get all words of this category
all_words+=word_dicts[cat_index].keys()
#combine all words of every category & make them unique to get vocabulary -V- of entire training set
vocab=np.unique(np.array(all_words))
vocab_length=vocab.shape[0]
#computing denominator value
denoms=np.array([cat_word_counts[cat_index]+vocab_length+1 for cat_index,cat in enumerate(classes)])
cats_info=[(word_dicts[cat_index],prob_classes[cat_index],denoms[cat_index]) for cat_index,cat in enumerate(classes)]
cats_info=np.array(cats_info)
return cats_info
def docProb(test_doc,classes,cats_info):
'''
Parameters:
-----------
1. a single test document
2. list of unique classes
3. category information containing prior probability and denominatory for each category
what the function does:
-----------------------
Function that estimates posterior probability of the given test document
Returns:
---------
probability of test document in all classes
'''
likelihood_prob=np.zeros(classes.shape[0]) #to store probability w.r.t each class
#finding probability w.r.t each class of the given test document
for cat_index,cat in enumerate(classes):
for test_token in test_doc.split(): #split the test document and get p of each test word
####################################################################################
#This loop computes : for each word w [ count(w|c)+1 ] / [ count(c) + |V| + 1 ]
####################################################################################
#get total count of this test token from it's respective training dict to get numerator value
test_token_counts=cats_info[cat_index][0].get(test_token,0)+1
#now get likelihood of this test_token word
test_token_prob=test_token_counts/float(cats_info[cat_index][2])
#To prevent underflow
likelihood_prob[cat_index]+=np.log(test_token_prob)
# we have likelihood estimate of the given document against every class but we need posterior probility
post_prob=np.empty(classes.shape[0])
for cat_index,cat in enumerate(classes):
post_prob[cat_index]=likelihood_prob[cat_index]+np.log(cats_info[cat_index][1])
return post_prob
def test(test_set,classes,cats_info):
'''
Parameters:
-----------
1. A complete test set of shape (m,)
2. list of unique classes
3. category information: prior probability and denominator information
What the function does?
-----------------------
Determines probability of each test document against all classes and predicts the label
against which the class probability is maximum
Returns:
---------
Predictions of test documents - A single prediction against every test document
'''
predictions=[] #to store prediction of each test document
for doc in test_set:
#get the posterior probability of every document
post_prob=docProb(doc,classes,cats_info) #get prob of this doucment for all classes
#pick the max value and map against all classes
predictions.append(classes[np.argmax(post_prob)])
return np.array(predictions)
def main():
train_data='forumTraining.txt' #getting all training documents
train_file = open(train_data)
train_docs=[]
df= pd.DataFrame(columns=['class','document_text'])
for line in train_file:
train_docs.append(line)
train_file.close()
for line in train_docs:
line = line.rstrip('\n')
words_list= line.split(" ")
df= df.append({'class': words_list[0], 'document_text':words_list[1:]}, ignore_index=True)
print ("Total Number of Training documents: ",len(train_data))
print ("------------------- train set Categories -------------- ")
classes= pd.unique(df['class'])
pprint(classes)
cats_info= train(df['document_text'],df['class'],classes)
print ("---------------- Training In Progress --------------------")
print ('----------------- Training Completed ---------------------')
test_file='forumTest.txt'
test_docs=[]
test_data= open(test_file)
for line in test_data:
test_docs.append(line)
test_data.close()
test_labels=[]
for line in test_docs:
line = line.rstrip('\n')
words_list= line.split(" ")
test_labels.append(words_list[0])
print("------------------- test set Categories -------------- ")
print(np.unique(test_labels))
test_docs = np.array(test_docs)
pclasses= test(test_docs,classes,cats_info)
test_acc= np.sum(pclasses==test_labels)/float(test_docs.shape[0])
print ("Test Set Documents: ",test_docs.shape[0])
print ("Test Set Accuracy: ",test_acc*100,"%")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from .coffee_ordered import CoffeeOrdered
from .coffee_served import CoffeeServed
from .coffee_finished import CoffeeFinished
__all__ = [
'CoffeeOrdered',
'CoffeeServed',
'CoffeeFinished',
]
|
from unittest import TestCase
from app import app
from mock import patch
class TestApp(TestCase):
@patch("app.app.render_home")
def test_home(self, render_home):
render_home.return_value = 'success'
self.assertEqual(app.home(), 'success')
@patch("app.app.render_category")
def test_category(self, render_category):
render_category.return_value = 'success'
file_name = 'dummy'
self.assertEqual(app.category(file_name), 'success')
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import argparse
import json
import subprocess
import sys
from pathlib import Path
from pants.backend.awslambda.python.target_types import PythonAwsLambdaRuntime
from pants.backend.python.util_rules.faas import PythonFaaSRuntimeField
from pants.base.build_environment import get_buildroot
COMMAND = "pip install pex 1>&2 && pex3 interpreter inspect --markers --tags"
RUNTIME_FIELDS = [
PythonAwsLambdaRuntime,
# TODO: what docker images to use for GCF?
]
def extract_complete_platform(repo: str, tag: str) -> object:
image = f"{repo}:{tag}"
print(f"Extracting complete platform for {image}", file=sys.stderr)
result = subprocess.run(
["docker", "run", "--entrypoint", "sh", image, "-c", COMMAND],
check=True,
stdout=subprocess.PIPE,
)
return json.loads(result.stdout)
def run(runtime_field: type[PythonFaaSRuntimeField], python_base: Path) -> None:
cp_dir = python_base / runtime_field.known_runtimes_complete_platforms_module().replace(
".", "/"
)
print(f"Generating for {runtime_field.__name__}, writing to {cp_dir}", file=sys.stderr)
for rt in runtime_field.known_runtimes:
cp = extract_complete_platform(runtime_field.known_runtimes_docker_repo, rt.tag)
fname = cp_dir / rt.file_name()
with fname.open("w") as f:
json.dump(cp, f, indent=2)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Generates the complete platform JSON files for AWS Lambda and GCF"
)
return parser
def main() -> None:
create_parser().parse_args()
build_root = Path(get_buildroot()) / "src/python"
for runtime_field in RUNTIME_FIELDS:
run(runtime_field, build_root)
if __name__ == "__main__":
main()
|
import numpy as np
import cv2
img = cv2.imread('images/car.jpg', cv2.IMREAD_COLOR)
img[55,55] = [255,255,255]
px = img[55,55]
img[100:150, 100:150] = [255,255,255]
watch_face = img[37:111, 107:194]
img[0:74, 0:87] = watch_face
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows
|
import os
# Create folder if not created
def create_project_dir(directory):
if not os.path.exists(directory):
print('Directory Created')
os.makedirs(directory)
# To crawl a page we need to provide a link to start, The crawler starts looking at all the links in the page and
# craws them Two text files will be created which stores the links to be crawled and the other for the links which
# are crawled
# Create queue and crawled files if not created
def create_data_files(project_name, base_url):
queue = project_name + '/queue.txt'
crawled = project_name + '/crawled.txt'
if not os.path.exists(queue):
write_file(queue, base_url) # sending base url because in the start it is not crawled
if not os.path.exists(crawled):
write_file(crawled, '') # Empty String as nothing is crawled in the first stage
def write_file(path, data):
f = open(path, 'w')
f.write(data)
f.close()
# Add data to existing file
def append_to_file(path, data):
with open(path, 'a', encoding='utf-8') as file:
file.write(data + '\n')
# Delete the contents of a file
def delete_file_contents(path):
with open(path, 'w'):
pass
# Read a file and add each line into a set
def file_to_set(file_name):
results = set()
with open(file_name, 'rt', encoding='utf-8') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
# Iterate through the set and add each item to the file
def set_to_file(links, file_name):
delete_file_contents(file_name)
for link in sorted(links):
append_to_file(file_name, link)
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
# contains count, median price for each quarter as cols, LGA in 2nd entry of
# each row.
def preprocess_house(fname):
# reorganises the house price csv files into a more standard format
# columns: Quarter, LGA, Count, Median
h = pd.read_csv(fname)
# # creates a sorted list of LGAs
LGAs = h["Unnamed: 1"].to_list()
LGAs = sorted(LGAs[2:-6])
LGAs = LGAs[0:28] + LGAs[35:] # removing "group total" rows
# creates a sorted list of quarters
quarters = h.values.tolist()[0]
quarters = quarters[2::2]
# duplicates list values to store 1 row per quarter per LGA
quarters_new = [""] * (len(quarters)*len(LGAs))
for i in range(len(quarters)):
for j in range(len(LGAs)):
quarters_new[i * len(LGAs) + j] = quarters[i]
LGAs_new = [""] * (len(quarters)*len(LGAs))
for i in range(len(quarters)):
for j in range(len(LGAs)):
LGAs_new[i * len(LGAs) + j] = LGAs[j]
dict = {} # stores counts and median house prices as list of tuples for each LGA
row_count = len(h.index)
for r in [i + 2 for i in range(row_count - 7)]:
row = h.values.tolist()[r]
if row[1] != "Group Total":
dict[row[1]] = []
for j in range(len(quarters)):
dict[row[1]].append((row[2*j+2], row[2*j+3]))
# uses dict to create columns for counts and median house prices
counts = [""] * len(quarters_new)
prices = [""] * len(quarters_new)
for i in range(len(quarters)):
j = 0
for LGA in sorted(dict.keys()):
if "-" not in dict[LGA][i][0]:
counts[i * len(LGAs) + j] = int(dict[LGA][i][0].replace(",", ""))
prices[i * len(LGAs) + j] = int(dict[LGA][i][1][1:].replace(",", ""))
else:
counts[i * len(LGAs) + j] = 0
prices[i * len(LGAs) + j] = 0
j += 1
df = pd.DataFrame.from_dict({"Quarter": quarters_new, "LGA": LGAs_new, "Count": counts, "Median House Price": prices})
# aggregating data per year
years = df["Quarter"].to_list()
for i in range(len(years)):
years[i] = years[i][4:]
df.insert(1, "Year", years, True)
df.drop("Quarter", inplace=True, axis=1)
df = df.reset_index()
prices_new = [''] * (79 * 22)
i = 0
year_set = sorted(list(set(years)))
LGA_set = sorted(list(set(df["LGA"].to_list())))
for year in year_set:
for LGA in LGA_set:
temp = df[df["Year"] == year]
temp = temp[temp["LGA"] == LGA]
temp = temp[temp["Median House Price"] != 0]
if len(temp):
p = temp["Median House Price"].to_list()
else:
p = [0]
prices_new[i] = sum(p) / len(p)
i += 1
df1 = df.groupby(['Year', 'LGA']).sum()
df1["Median House Price"] = prices_new
df1 = df1[-790::]
df1 = df1.reset_index().drop("index", axis=1)
return df1
def preprocess_crime1(fname):
# reorganises and cleans crime1 csv
# columns: year, lga, incidents, rate per 100k
df = pd.read_csv(fname, usecols=["Year", "Local Government Area", "Incidents Recorded", 'Rate per 100,000 population']) # contains incidents and rate/100k
# sorting years in ascending order
df = df.sort_values(by=["Year", "Local Government Area"])
# removing misc rows
df = df[df["Local Government Area"] != "Total"]
df = df[df["Local Government Area"] != " Unincorporated Vic"]
df = df[df["Local Government Area"] != " Justice Institutions and Immigration Facilities"]
df = df.reset_index()
df.drop("index", inplace=True, axis=1)
# converting rate and incidents to int/float
rates = df["Rate per 100,000 population"].to_list()
incidents = df["Incidents Recorded"].to_list()
for i in range(len(rates)):
rates[i] = float(rates[i].replace(",", ""))
incidents[i] = int(incidents[i].replace(",", ""))
df["Rate per 100,000 population"] = rates
df["Incidents Recorded"] = incidents
return df
def preprocess_combined(df1, df2):
# Combines two dataframes and removes rows with 0
df1.insert(len(df1.columns), "Incidents Recorded", df2["Incidents Recorded"])
df1.insert(len(df1.columns), "Rate per 100,000 population", df2["Rate per 100,000 population"])
df1 = df1[df1["Median House Price"] != 0]
df1.reset_index(inplace=True)
return df1
def cluster_kmeans(X, k, fname, htype):
kmeans = KMeans(n_clusters=k, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit_predict(X)
plt.scatter(X[:,0], X[:,1])
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red')
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title(f"Median Rent Price vs Incidents Recorded per 100k population for {htype}", fontsize=10)
plt.savefig(fname)
plt.clf()
return 0
def cluster_agglomerative(X, k, fname, htype):
clustering = AgglomerativeClustering(n_clusters=k).fit(X)
plt.scatter(X[:,0],X[:,1], c=clustering.labels_, cmap='rainbow')
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title(f"Median Rent Price vs Incidents Recorded per 100k population for {htype}", fontsize=10)
plt.savefig(fname)
plt.clf()
return 0
# reading in csv files
c1 = preprocess_crime1("crime1.csv") # contains incidents and rate/100k
one_bf = preprocess_combined(preprocess_house("1bflat.csv"), c1) # contains count and median price
one_bf_2020 = one_bf[one_bf["Year"] == "2020"]
two_bf = preprocess_combined(preprocess_house("2bflat.csv"), c1) # contains count and median price
two_bf_2020 = two_bf[two_bf["Year"] == "2020"]
three_bf = preprocess_combined(preprocess_house("3bflat.csv"), c1) # contains count and median price
three_bf_2020 = three_bf[three_bf["Year"] == "2020"]
two_bh = preprocess_combined(preprocess_house("2bhouse.csv"), c1) # contains count and median price
two_bh_2020 = two_bh[two_bh["Year"] == "2020"]
three_bh = preprocess_combined(preprocess_house("3bhouse.csv"), c1) # contains count and median price
three_bh_2020 = three_bh[three_bh["Year"] == "2020"]
four_bh = preprocess_combined(preprocess_house("4bhouse.csv"), c1) # contains count and median price
four_bh_2020 = four_bh[four_bh["Year"] == "2020"]
all = preprocess_combined(preprocess_house("all.csv"), c1) # contains count and median price
all_2020 = all[all["Year"] == "2020"]
p = all["Median House Price"].to_list()
c = all["Rate per 100,000 population"].to_list()
l = []
for i in range(len(all)):
l.append([p[i], c[i]])
X = np.array(l)
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.savefig("all-elbow.png")
plt.clf()
cluster_kmeans(X, 4, "plot-all-kmeans-4.png", "All House Types")
cluster_agglomerative(X, 4, "plot-all-agglomerative-4.png", "All House types")
p = two_bh["Median House Price"].to_list()
c = two_bh["Rate per 100,000 population"].to_list()
l = []
for i in range(len(two_bh)):
l.append([p[i], c[i]])
Y = np.array(l)
cluster_kmeans(Y, 4, "plot-2bh-kmeans.png", "2 Bedroom Houses")
cluster_agglomerative(Y, 4, "plot-2bh-agglomerative.png", "2 Bedroom Houses")
# print(h1)
# print(h1)
# print(c1)
# h1.to_csv("h1.csv")
# c1.to_csv("c1.csv")
# 1 BEDROOM FLAT
plt.scatter(one_bf["Median House Price"], one_bf["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 1 Bedroom Flat")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot1bf.png")
plt.clf()
plt.scatter(one_bf["Median House Price"], one_bf["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 1 Bedroom Flat")
plt.savefig("plot1bf100k.png")
plt.clf()
# 1 BEDROOM FLAT 2020
plt.scatter(one_bf_2020["Median House Price"], one_bf_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 1 Bedroom Flat in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot1bf-2020.png")
plt.clf()
plt.scatter(one_bf_2020["Median House Price"], one_bf_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 1 Bedroom Flat in 2020")
plt.savefig("plot1bf100k-2020.png")
plt.clf()
# 2 BEDROOM FLAT
plt.scatter(two_bf["Median House Price"], two_bf["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 2 Bedroom Flat")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot2bf.png")
plt.clf()
plt.scatter(two_bf["Median House Price"], two_bf["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 2 Bedroom Flat")
plt.savefig("plot2bf100k.png")
plt.clf()
# 2 BEDROOM FLAT 2020
plt.scatter(two_bf_2020["Median House Price"], two_bf_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 2 Bedroom Flat in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot2bf-2020.png")
plt.clf()
plt.scatter(two_bf_2020["Median House Price"], two_bf_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 2 Bedroom Flat in 2020")
plt.savefig("plot2bf100k-2020.png")
plt.clf()
# 2 BEDROOM HOUSE
plt.scatter(two_bh["Median House Price"], two_bh["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 2 Bedroom House")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot2bh.png")
plt.clf()
plt.scatter(two_bh["Median House Price"], two_bh["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 2 Bedroom House")
plt.savefig("plot2bh100k.png")
plt.clf()
# 2 BEDROOM HOUSE 2020
plt.scatter(two_bh_2020["Median House Price"], two_bh_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 2 Bedroom House in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot2bh-2020.png")
plt.clf()
plt.scatter(two_bh_2020["Median House Price"], two_bh_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 2 Bedroom House in 2020")
plt.savefig("plot2bh100k-2020.png")
plt.clf()
# 3 BEDROOM FLAT
plt.scatter(three_bf["Median House Price"], three_bf["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 3 Bedroom Flat")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot3bf.png")
plt.clf()
plt.scatter(three_bf["Median House Price"], three_bf["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 3 Bedroom Flat")
plt.savefig("plot3bf100k.png")
plt.clf()
# 3 BEDROOM FLAT 2020
plt.scatter(three_bf_2020["Median House Price"], three_bf_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 3 Bedroom Flat in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot3bf-2020.png")
plt.clf()
plt.scatter(three_bf_2020["Median House Price"], three_bf_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 3 Bedroom Flat in 2020")
plt.savefig("plot3bf100k-2020.png")
plt.clf()
# 3 BEDROOM HOUSE
plt.scatter(three_bh["Median House Price"], three_bh["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 3 Bedroom House")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot3bh.png")
plt.clf()
plt.scatter(three_bh["Median House Price"], three_bh["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 3 Bedroom House")
plt.savefig("plot3bh100k.png")
plt.clf()
# 3 BEDROOM HOUSE 2020
plt.scatter(three_bh_2020["Median House Price"], three_bh_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 3 Bedroom House in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot3bh-2020.png")
plt.clf()
plt.scatter(three_bh_2020["Median House Price"], three_bh_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 3 Bedroom House in 2020")
plt.savefig("plot3bh100k-2020.png")
plt.clf()
# 4 BEDROOM HOUSE
plt.scatter(four_bh["Median House Price"], four_bh["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 4 Bedroom House")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot4bh.png")
plt.clf()
plt.scatter(four_bh["Median House Price"], four_bh["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 4 Bedroom House")
plt.savefig("plot4bh100k.png")
plt.clf()
# 4 BEDROOM HOUSE 2020
plt.scatter(four_bh_2020["Median House Price"], four_bh_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for 4 Bedroom House in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plot4bh-2020.png")
plt.clf()
plt.scatter(four_bh_2020["Median House Price"], four_bh_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for 4 Bedroom House in 2020")
plt.savefig("plot4bh100k-2020.png")
plt.clf()
# ALL
plt.scatter(all["Median House Price"], all["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for All Housing")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plotall.png")
plt.clf()
plt.scatter(all["Median House Price"], all["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for All Housing")
plt.savefig("plotall100k.png")
plt.clf()
# ALL 2020
plt.scatter(all_2020["Median House Price"], all_2020["Incidents Recorded"])
plt.xlabel("Median Rent Price")
plt.ylabel("Incidents Recorded")
plt.title("Median Rent Price vs Incidents Recorded for All Housing in 2020")
plt.yticks(np.arange(0, 32000, 2000))
plt.savefig("plotall-2020.png")
plt.clf()
plt.scatter(all_2020["Median House Price"], all_2020["Rate per 100,000 population"])
plt.xlabel("Median Rent Price")
plt.ylabel("Rate per 100,000 population")
plt.title("Median Rent Price vs Rate per 100,000 population for All Housing in 2020")
plt.savefig("plotall100k-2020.png")
plt.clf()
# to add: plot more things, add calculations
# # to add: plot more things, add calculations
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from tkinter import *
def onClickDraw():
plist=['Name1','Name2','Name3']
for item in plist:
listBox.insert(END,item)
root=Tk()
listBox=Listbox(root)
button=Button(root,text='Draw List',command=onClickDraw)
button.pack()
listBox.pack()
root.mainloop()
|
num1 = float(input('Write your first number: '))
num2 = float(input('Write your second number: '))
if num1 < num2:
print('Your first number is lower than second')
elif num1 == num2:
print('Your numbers are equal')
else:
print('Second number is lower than first')
|
# I THINK I AM GOING TO HAVE TO CHANGE THIS. COPIED AND PASTED FROM THE bootcamp/news/views.py FILE.
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_http_methods
from django.views.generic import ListView, DeleteView
from bootcamp.helpers import ajax_required, AuthorRequiredMixin
# from bootcamp.news.models import News
from bootcamp.community.models import Community
class CommunityListView(LoginRequiredMixin, ListView):
"""A really simple ListView, with some JS magic on the UI."""
# model = News
model = Community
paginate_by = 15
def get_queryset(self, **kwargs):
# return News.objects.filter(reply=False)
return Community.objects.filter(reply=False)
class CommunityDeleteView(LoginRequiredMixin, AuthorRequiredMixin, DeleteView): # Changed news to community
"""Implementation of the DeleteView overriding the delete method to
allow a no-redirect response to use with AJAX call."""
# model = News
model = Community
# success_url = reverse_lazy("news:list")
success_url = reverse_lazy("community:list")
@login_required
@ajax_required
@require_http_methods(["POST"])
def post_community(request): # Changed news to community
"""A function view to implement the post functionality with AJAX allowing
to create Community instances as parent ones.""" # Changed news to community
# print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
# print(request)
# print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
user = request.user
post = '<h5>Title\n</h5>' + request.POST.getlist('post')[0] + '\n\n\n\n' + '<h5>Target Group\n</h5>' + request.POST.getlist('post')[1] + '<h5>Description\n</h5>' + request.POST.getlist('post')[2] + '\n\n' + '<h5>Duration\n</h5>' + request.POST.getlist('post')[3] + '<h5>Link\n</h5>' + request.POST.getlist('post')[4]
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
print(str(request.POST.getlist('post')))
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
# post = post.strip()
if 0 < len(post) <= 1000:
# posted = News.objects.create(user=user, content=post)
posted = Community.objects.create(user=user, content = post)
html = render_to_string(
# "news/news_single.html", {"news": posted, "request": request}
"community/community_single.html", {"community":posted, "request": request}
)
return HttpResponse(html)
else:
length = len(post) - 1000
return HttpResponseBadRequest(
content=_(f"Text is {length} characters longer than accepted.")
)
@login_required
@ajax_required
@require_http_methods(["POST"])
def like(request):
"""Function view to receive AJAX, returns the count of likes a given community
has recieved.""" # Changed news to community
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print(request.POST)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
community_id = request.POST["community"]
print("community_id" + community_id)
# community_id = request.POST.get('community', 'some_default')
community = Community.objects.get(pk=community_id)
user = request.user
community.switch_like(user)
return JsonResponse({"likes": community.count_likers()})
@login_required
@ajax_required
@require_http_methods(["POST"])
def attended(request):
"""Function view to receive AJAX, returns the count of attended a given news
has recieved."""
community_id = request.POST["community"]
community = Community.objects.get(pk=community_id)
user = request.user
community.switch_attend(user)
return JsonResponse({"attendeds": community.count_attendees()})
@login_required
@ajax_required
@require_http_methods(["GET"])
def get_thread(request):
"""Returns a list of community with the given community as parent.""" # Changed news to community
# news_id = request.GET["news"]
# news = News.objects.get(pk=news_id)
# news_html = render_to_string("news/news_single.html", {"news": news})
# thread_html = render_to_string(
# "news/news_thread.html", {"thread": news.get_thread(), "request": request}
# )
# return JsonResponse({"uuid": news_id, "news": news_html, "thread": thread_html})
community_id = request.GET["community"]
community = Community.objects.get(pk=community_id)
community_html = render_to_string("community/community_single.html", {"community": community})
thread_html = render_to_string(
"community/community_thread.html", {"thread": community.get_thread(), "request": request}
)
return JsonResponse({"uuid": community_id, "community": community_html, "thread": thread_html})
@login_required
@ajax_required
@require_http_methods(["POST"])
def post_comment(request):
"""A function view to implement the post functionality with AJAX, creating
Community instances who happens to be the children and commenters of the root
post.""" # Changed news to community
user = request.user
post = request.POST["reply"]
par = request.POST["parent"]
# parent = News.objects.get(pk=par)
parent = Community.objects.get(pk=par)
post = post.strip()
if post:
parent.reply_this(user, post)
return JsonResponse({"comments": parent.count_thread()})
else:
return HttpResponseBadRequest()
@login_required
@ajax_required
@require_http_methods(["POST"])
def update_interactions(request):
data_point = request.POST["id_value"]
# news = News.objects.get(pk=data_point)
community = Community.objects.get(pk=data_point)
# data = {"likes": news.count_likers(), "comments": news.count_thread()}
data = {"likes": community.count_likers(), "comments": community.count_thread(), "attendeds": community.count_attendees()}
return JsonResponse(data)
|
from matplotlib.pyplot import imread, show
from numpy import copy, average, sqrt, arctan
import os
edge = imread("C:\Temp\Edge2.png")
class lineObject:
"""
Line object that contains a line within a specific image
"""
def __init__(self, line):
self.length = len(line)
self.endPoint1 = line[0]
self.endPoint2 = line[-1]
self.midpoint = [abs(self.endPoint1[0] - self.endPoint2[0]) / 2,
abs(self.endPoint1[1] - self.endPoint2[1]) / 2]
def Correlation(line, resolution, threshold):
"""
Given an array of adjacent pixel locations, it will determine
if the line is straight enought to be considered a line.
it uses the two endpoints to create the line to which its
correlation is measured. The line is split into 'resolution'
lines whose slopes are then compared to the ideal line.
'threshold' is the variability allowed in the difference
between these slopes
"""
start = line[0]
end = line[-1]
length = len(line)
dy = end[0] - start[0]
dx = end[1] - start[1]
try:
masterAngle = arctan(abs(dy/dx))
if (dy/dx < 0 or (dy < 0 and dx < 0)):
masterAngle += 3.1415 # pi or 180 degrees
except ZeroDivisionError:
if dy > 0:
masterAngle = 1.57 # 90deg in radians
else:
masterAngle = -1.57
segmentLength = length / resolution
segments = []
startPoint = start
for i in range(1, resolution + 1):
endPoint = line[segmentLength * i - 1]
segments.append([startPoint, endPoint])
startPoint = endPoint
segmentSlopes = []
for i in segments:
start = i[0]
end = i[1]
dy = end[0] - start[0]
dx = end[1] - start[1]
try:
angle = arctan(abs(dy/dx))
if (dy/dx < 0 or (dy < 0 and dx < 0)):
angle += 3.1415 # pi or 180 degrees
except ZeroDivisionError:
if dy > 0:
angle = 1.57 # 90deg in radians
else:
angle = -1.57
segmentSlopes.append(angle)
ave = average(segmentSlopes)
if(ave < (masterAngle + threshold) and ave > (masterAngle - threshold)):
return True
else:
return False
def TestGrid(im,x,y):
"""
given a bitmap image and a true pixel, it searches for another true pixel
that is adjacent to it. It then returns a bool telling if a true pixel
was found and an integer corresponding to that pixel's position.
"""
try:
up = im[y-1][x]
down = im[y+1][x]
right = im[y][x+1]
left = im[y][x-1]
upRight = im[y-1][x+1]
upLeft = im[y-1][x-1]
lowRight = im[y+1][x+1]
lowLeft = im[y+1][x-1]
grid = [upLeft,up,upRight,left,0,right,lowLeft,down,lowRight]
for index in range(len(grid)):
if(grid[index] == 1):
return True, index
return False, -1
except IndexError:
return False, -1
def TestPossibleLine(im,y,x,minLength, maxLength, resolution, threshold):
"""
given a bitmap image and a true pixel, it will iterativly call
TestGrid to find the next pixel in a possible line until TestGrid
returns false. It then check to see if the line is long enough
and whether it is straight enough using the correlation function.
Additionally, it ensures it is only adding straight points to
"linePoints" by checking the mostcommon direction index ( retruned
after calling TestGrid) against the most common direction index of
the previous numIndexVals number of points. This mitigates against
a problem where the lines would follow doglegs.
"""
numIndexVals = 4
linePoints = []
flag, index = TestGrid(im,x,y)
totalIndex = []
lastIndex = []
for i in range(numIndexVals):
lastIndex.append(0)
count = 0
while(flag):
count += 1
if(flag):
if(index == 2):
linePoints.append([y,x])
im[y][x] = 2
x = x + 1
y = y - 1
elif(index == 5):
linePoints.append([y,x])
im[y][x] = 2
x = x + 1
elif(index == 8):
linePoints.append([y,x])
im[y][x] = 2
x = x + 1
y = y + 1
elif(index == 1):
linePoints.append([y,x])
im[y][x] = 2
y = y - 1
elif(index == 7):
linePoints.append([y,x])
im[y][x] = 2
y = y + 1
if(index == 0):
linePoints.append([y,x])
im[y][x] = 2
x = x - 1
y = y - 1
elif(index == 3):
linePoints.append([y,x])
im[y][x] = 2
x = x - 1
elif(index == 6):
linePoints.append([y,x])
im[y][x] = 2
x = x - 1
totalIndex.append(index)
lastIndex.insert(0,index)
lastIndex.pop()
flag, index = TestGrid(im,x,y)
if count % 5 == 0:
mostCommonTotal = max(set(totalIndex), key=totalIndex.count)
mostCommonLast = max(set(lastIndex), key=lastIndex.count)
if mostCommonTotal != mostCommonLast:
flag = False
for i in range(numIndexVals):
linePoints.pop()
if(len(linePoints) != 0):
lineLength = sqrt((linePoints[0][0] - linePoints[-1][0])**2 + (linePoints[0][1] - linePoints[-1][1])**2)
if(lineLength >= minLength and lineLength <= maxLength and Correlation(linePoints,resolution,threshold)):
for i in linePoints:
im[i[0]][i[1]] = 3
return lineObject(linePoints), im
return "notLine", im
def FindLines(im, minLength, maxLength, resolution, threshold):
"""
Input a canny edge detected image and the minimum length of a line in pixles
0 = pixle is not a part of a line
1 = pixle may be a part of a line
2 = pixle is a part of the line undertest
"""
lines = [] # array of line objects
y, x = im.shape
for j in range(1,y-1):
for i in range(1,x-1):
if(im[j][i] == 1):
im[j][i] = 4
line,im = TestPossibleLine(im, j, i, minLength, maxLength, resolution, threshold)
if (line != "notLine"):
lines.append(line)
return lines
#lines = FindLines(edge, 50, 500, 20, 2)
|
def register(cls, *args, **kwarg):
print(cls, args, kwarg, 'one')
def detor(*args, **kw):
print(args, kw, 'two')
return detor
class ooop(object):
def __init__(self):
register('123', '2345','2321', l = '2345')(self.fn)
def fn(self):
pass
class ValStats(object):
@classmethod
def func(self, *args, **kwarg):
print(args)
@classmethod
def execute(cls):
cls.func(456,789)
def output(number):
print('()' * number)
def print_paren(result, left, right):
if left == 0 and right == 0:
print(result)
elif left == 0 and right > 0:
print_paren(result+")", left, right-1)
else:
if left == right:
print_paren(result+"(", left-1, right)
elif left < right:
print_paren(result+"(", left-1, right)
print_paren(result+")", left, right-1)
if __name__ == '__main__':
# print_paren('', 2, 2)
ValStats.execute()
|
# Parameters, Unpacking, Variables
from sys import argv
# read the WYSS section for how to run this
script, first, second, third = argv
# argv is the "argument variable" -> holds the arguments you pass to your
# Python script when you run it.
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third)
|
from . import token
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.