blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f55d831febca599c28a27d6fb33bcae9ba0111a6 | Python | LWHSTechnicalArts/Raspberry-Pi | /blynk_servo_slider.py | UTF-8 | 895 | 2.71875 | 3 | [] | no_license | import blynklib
import time
from adafruit_servokit import ServoKit
kit = ServoKit(channels=16)
BLYNK_AUTH = 'your_auth_token'
# initialize Blynk
blynk = blynklib.Blynk(BLYNK_AUTH)
WRITE_EVENT_PRINT_MSG = "[WRITE_VIRTUAL_PIN_EVENT] Pin: V{} Value: '{}'"
# register handler for virtual pin V4 write event
@blynk.handle_event('write V5')
def write_virtual_pin_handler(pin, value):
print(WRITE_EVENT_PRINT_MSG.format(pin, value))
print(value[0])
kit.servo[0].angle = int(value[0])
time.sleep(0)
@blynk.handle_event('write V6')
def write_virtual_pin_handler(pin, value):
print(WRITE_EVENT_PRINT_MSG.format(pin, value))
print(value[0])
kit.servo[3].angle = int(value[0])
time.sleep(0)
###########################################################
# infinite loop that waits for event
###########################################################
while True:
blynk.run()
| true |
ad6ce3de0654f7213851e6d0e72781134d360dc8 | Python | annikahannig/nanoflow | /nanoflow/graph.py | UTF-8 | 3,424 | 3.609375 | 4 | [] | no_license | """
Nanoflow Graph:
Define a computational graph.
All operations will be run async.
Feed data into an node by providing a feed_dict
to the run operation.
"""
import asyncio
from .session import Session
class Node(object):
"""
Nanoflow nodes contain computational operations.
These operations are exectued async
"""
def __init__(self, inputs=[], name=None, fn=None):
"""Initialize new operation node"""
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
self.inputs = inputs
self.name = name
self.fn = fn
self.has_cached_result = False
self.cached_result = None
async def fetch_inputs(self, session):
fetchers = [asyncio.ensure_future(node_in.result(session))
for node_in in self.inputs]
inputs = await asyncio.gather(*fetchers)
return inputs
async def fetch_input(self, session, i):
result = await self.inputs[i].result(session)
return result
async def result(self, session):
if session.get(self, 'has_cached_result'):
return session.get(self, 'cached_result')
if self.fn:
inputs = await self.fetch_inputs(session)
if asyncio.iscoroutinefunction(self.fn):
res = await self.fn(*inputs)
else:
res = self.fn(*inputs)
session.set(
self, True, key='has_cached_result'
).set(
self, res, key='cached_result')
return res
return None
def op(fn):
"""
Operation decorator:
Wrap a (async) function in a Node
"""
def wrap(*args, **kwargs):
kwargs['fn'] = fn
return Node(*args, **kwargs)
return wrap
class Placeholder(Node):
"""
Placeholder keep data and will be initialized
from the feed_dict.
It's important to give your placholders a name, so
we can look them up and feed the from the feed_dict
before executing the graph.
"""
def feed(self, session, value):
"""Store value in session"""
session.set(self, value)
async def result(self, session):
return session.get(self)
class Constant(Node):
"""
Return a constant
"""
def __init__(self, value):
self.value = value;
async def result(self, session):
return self.value
def _find_named_node(node, name):
"""Small helper to find a node by name"""
if node.name == name:
return node
# check if the node is present as input
for node_in in node.inputs:
res = _find_named_node(node_in, name)
if res:
return res
return None
def run_async(output, feed_dict={}):
"""Setup graph and return futrue for running on event loop"""
# Create a new session for storing and
# retriving values for nodes.
session = Session()
# Set placeholders
for key, val in feed_dict.items():
input_node = _find_named_node(output, key)
if input_node:
input_node.feed(session, val)
future = asyncio.ensure_future(output.result(session))
return future
def run(output, feed_dict={}):
"""Run the graph on the event loop"""
loop = asyncio.get_event_loop()
# Run graph
future = run_async(output, feed_dict)
result = loop.run_until_complete(future)
return result
| true |
f0817ba66d40cc4100dcaac732eed04c4c6045b1 | Python | tongwang/django-sugar | /sugar/utils/profiling.py | UTF-8 | 1,129 | 2.875 | 3 | [] | no_license | """
Utilities to help localize performance problems
"""
from contextlib import contextmanager
import inspect
import time
import sys
from django.db import connection
__all__ = ['query_count']
@contextmanager
def query_count(desc=None, print_queries=False):
"""
Display a list of the database query count delta for an arbitrary block of
code enclosed in a with statement::
with query_count("retrieve user contact preferences"):
# actually do something
would display:
retrieve user contact preferences: 37 queries in 0.53 seconds
"""
if not desc:
desc = "[%s:%s]" % inspect.stack()[2][1:3]
# TODO: Verbose mode which displays queries?
original_query_count = len(connection.queries)
start_time = time.time()
yield
elapsed = time.time() - start_time
qc_diff = len(connection.queries) - original_query_count
print >>sys.stderr, "%s: %d queries in %0.2f seconds" % (
desc,
qc_diff,
elapsed
)
if print_queries:
for i in connection.queries[:-qc_diff]:
print >>sys.stderr, "\t%(time)s\t%(sql)s" % i | true |
e0bf9fd1b86eec79dcdb598b85c77f6f9521c594 | Python | freeflyfish/bqhr | /workbase/code_py/requests_demo/test8.py | UTF-8 | 2,147 | 3 | 3 | [] | no_license | # coding:utf-8
import base64
import hashlib
data = b'scx1123'
hash_new = hashlib.md5(data) #或hashlib.md5()
hash_value = hash_new.hexdigest() #生成40位(sha1)或32位(md5)的十六进制字符串
print(hash_value)
print(len(hash_value))
print(base64.b64encode(data))
# e17910c91985a9923d87c516330dda05b50e272ca092c0b90aedfa63bf9b77aed63abf3b41d2045a5459ed38dc4343ed1997e90c4d1fa5fa408690cfedc5f2e5
# 7cfabd1aa17366f656f798f561059aeb80fccc35dd1c3af4caac4f6eb478c7b6350cc35e29e54e3f654ef0f57324302e9ba3aed0aba23b7446bc1b875d097ba1d39af4d8ce81131e6b8064a7a179f30d712a35b425eedf878571d0b598bc03faa251766810366fe3e5e16db99d714b1e2aefa2791287fc765df0e53ef1a9ca4f
# 48f47223167479066fa329b6912fb2bd
# 48f47223167479066fa329b6912fb2bd
s = 'a6db11202856a75dd057be58673f99752de7520bb9ca5fc7a1bcf692f510602a2e23a99183a40ef98edecf11c2887fa5ac3663282ee464cbb01a810c154f4ad25a17ea3650e2147cc425bb591a86e0e13bad750100180d0beaa3bb0d41ad0a4668092cddc29ee259937f1ad0cb4b1879f7944ce3195fcb88f4fec9a2d2b22701'
print(len(s))
def strxor(a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
# print('密码高强度加密方法')
# key = 'android123456'
# salt = 'python'
# print(key)
# '''''
# data=strxor("123456",key)
# print(data)
# data=strxor("123456",data)
# print(data)
# '''
#
# key1 = base64.b64encode(key.encode('utf-8'))
# print(key1)
# key2 = hashlib.md5(key1 + salt.encode('utf-8')).hexdigest()
# print(key2)
# key3 = hashlib.sha1(key2 + salt.encode('utf-8')).hexdigest()
# print('该sha1密文不可逆')
# print(key3)
# key4 = hashlib.sha512(key3 + salt.encode('utf-8')).hexdigest()
# print('该sha512密文不可逆')
# print(key4)
#
# print('test===========')
# print(hashlib.algorithms_available)
# h = hashlib.new('md5')
# print(h)
# h.update('begin')
# print(h.hexdigest())
# print(h.digest())
# print(h.digest_size)
# print(h.block_size)
# h2 = hashlib.new('ripemd160', 'what')
# print(h2)
# print(h2.hexdigest())
# print(h2.digest_size)
# print(h2.block_size) | true |
4fb625ac51bbe636d92b5420825791e18d300086 | Python | AntPokh/PythonNew | /2.2homework.py | UTF-8 | 706 | 4.34375 | 4 | [] | no_license | #Составить алгоритм увеличения всех трех, введённых с клавиатуры, переменных на 5,если среди них есть хотя бы две равные.
# В противном случае выдать ответ «равных нет».
number1 = input("Введите первое число :")
number1 = int(number1)
number2 = input("Введите второе число :")
number2 = int(number2)
number3 = input("Введите третье число :")
number3 = int(number3)
if number1 == number2 or number1 == number3 or number2 == number3:
print(number1 + number2 + number3 + 5)
else:
print("Равных нет!")
| true |
a8cd50e8f44b8d524dead6cce263d6ee31867717 | Python | dbriggs2/bmgt404 | /database.py | UTF-8 | 2,574 | 2.859375 | 3 | [] | no_license | import sqlite3
def db_account_exists(email):
conn = sqlite3.connect('schedule.db')
c = conn.cursor()
c.execute('SELECT * FROM account WHERE email = ?', (email,))
return c.fetchone()
def db_account_Store(email, password):
#file name of database needed to be changed
conn = sqlite3.connect('schedule.db')
c = conn.cursor()
c.execute('INSERT INTO account (email, password) VALUES (?,?)', (email, password))
conn.commit()
# In[15]:
def db_transaction_account(userEmail,userPassword):
import random
rand = random.choice('0123456789') # Chooses a random element
rand1 = random.choice('0123456789')
rand2 = random.choice('0123456789')
rand3 = random.choice('0123456789')
userName = ""
userID = ""
#UserID is consisted with userNmae and a 3 digits random number
for i in userEmail:
if i == '@':
break
else:
userName = userName + i
userID = userName + rand + rand1 + rand2 + rand3
db_information = (userID, userEmail, userPassword)
return db_information
# In[13]:
def db_task_Store(task):
#file name of database needed to be changed
conn = sqlite3.connect('schedule.db')
c = conn.cursor()
c.execute(('INSERT INTO Account VALUES (?,?,?,?,?,?,?)', task))
conn.commit()
# In[1]:
def db_transaction_task(userID,account_id, task_Name, type, description, date):
task = (userID,account_id, task_Name, type, description, date,time)
return task
# In[23]:
def db_Login(userEmail, userPassword):
#file name of database needed to be changed
conn = sqlite3.connect('schedule.db')
c = conn.cursor()
# c.execute('Select email From account Where email = %s'%userEmail)
# email = c.fetchone()[0]
# if email.strip() == "":
# return False
# c.execute('Select password From account Where password = %s'%userPassword )
# password = c.fetchone()[0]
# if password.strip() == "":
# return False
c.execute('SELECT password FROM account WHERE email = ?', (userEmail,))
pw = c.fetchone()
return pw is not None and pw[0] == userPassword
# In[24]:
def extract_task(Id):
#file name of database needed to be changed
conn = sqlite3.connect('schedule.db')
c = conn.cursor()
c.execute('Select * From account Where id = %s'%Id)
account_id = c.fetchone()[1]
task_name = c.fetchone()[2]
task_description = c.fetchone()[3]
date_time = c.fetchone()[4]
task = (account_id,task_name,task_description, date_time)
return task
| true |
205f200734f65b4488e7cb119c8cd77b561cf2bb | Python | avi3tal/knowledgebase | /interviewbit/courses/programming/arrays/maximum_absolute_difference.py | UTF-8 | 895 | 4.15625 | 4 | [
"MIT"
] | permissive | """
You are given an array of N integers, A1, A2 ,…, AN. Return maximum value of f(i, j) for all 1 ≤ i, j ≤ N.
f(i, j) is defined as |A[i] - A[j]| + |i - j|, where |x| denotes absolute value of x.
For example,
A=[1, 3, -1]
f(1, 1) = f(2, 2) = f(3, 3) = 0
f(1, 2) = f(2, 1) = |1 - 3| + |1 - 2| = 3
f(1, 3) = f(3, 1) = |1 - (-1)| + |1 - 3| = 4
f(2, 3) = f(3, 2) = |3 - (-1)| + |2 - 3| = 5
So, we return 5.
"""
class Solution:
# @param A : list of integers
# @return an integer
def maxArr(self, A):
if len(A) >= 2:
last_max = 0
for i in range(len(A)):
for j in range(i+1, len(A)):
tmp = abs(A[i] - A[j]) + abs(i - j)
if tmp > last_max:
last_max = tmp
return last_max
return 0
s = Solution()
print(s.maxArr([1, 3, -1]))
print(s.maxArr([1]))
| true |
14a38ce09fb0ee55a98dcb3ea5c85a5be4935b32 | Python | KishaniKandasamy/PythonForResearch | /randomwalk.py | UTF-8 | 368 | 3.1875 | 3 | [] | no_license | import random
import matplotlib.pyplot as plt
import numpy as np
#from the origin
X_O = np.array([[0],[0]])
delta_X=np.random.normal(0,1,(2, 100))
plt.plot(delta_X[0],delta_X[1],"go")
#find cumulative sum of displacements
X=np.cumsum(delta_X, axis=1)
#concatenate numpy arrays
X=np.concatenate((X_O,X), axis=1)
#plot the random walks
plt.plot(X[0],X[1] ,"ro-");
| true |
d10d28934615aff337c23a6980e0e9c26ebd6195 | Python | sergpolkin/zig-os | /tools/gen_image.py | UTF-8 | 917 | 2.828125 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
import sys
import subprocess
KERNEL_OFFSET = 1024 * 512
def _concat(a, b):
while (True):
buf = b.read(4*1024)
if (len(buf) == 0):
break
a.write(buf)
def main():
if len(sys.argv) != 4:
return -1
bootloader = sys.argv[1]
kernel = sys.argv[2]
image = sys.argv[3]
# TODO check `bootloader` and `kernel` exists
# Generate `bootloader` binary
subprocess.check_call(['objcopy', '-Obinary', bootloader, image])
with open(image, 'r+b') as image_fd:
# Get `bootloader` length
image_fd.seek(0, 2)
bootloader_len = image_fd.tell()
print(f"[gen_image] Bootloader size: {bootloader_len} bytes")
# Append `kernel` ELF file
image_fd.seek(KERNEL_OFFSET, 0)
_concat(image_fd, open(kernel, 'rb'))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| true |
97c95f0afeaf8c76072296113c6442eaf0c71795 | Python | REFENTSE-MOTLGELWA/Team-4-Project-2 | /Separated Functions 1-7/function-six.py | UTF-8 | 659 | 3.875 | 4 | [
"MIT"
] | permissive | ### START FUNCTION
def word_splitter(df):
"""
Splits the sentences of a specific dataframe column into a list of the
separate words
Arg :
df : Pandas Dataframe
Returns :
df : Modified original dataframe where the specific column sentences
are now a list of seperated lowercase words
Example :
>>>df.column_name = @ArthurGodbeer Is the power restored as yet?
[@arthurgodbeer, is, the, power, restored, as, yet, ?]
"""
result = []
l1 = df['Tweets']
for tweet in l1:
result.append(tweet.lower().split(' '))
df['Split Tweets'] = result
return df
### END FUNCTION
| true |
fd3aba57049b775a43a9db5066c387f18f5d8d97 | Python | t-young31/ORCA-scripts | /mk_scan_inp.py | UTF-8 | 4,315 | 2.8125 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("filename", action='store', help='.inp file')
return parser.parse_args()
args = get_args()
input_path = args.filename
'''
-----------------------------Parse the input file--------------------------
'''
charge = mult = n_procs = None
xyzs = []
with open(input_path, 'r') as in_file:
keywords = in_file.readline().split()
for item in keywords:
if item.startswith("PAL"):
n_procs = item.strip()[-1]
xyz_start = False
while xyz_start == False:
in_file_line = in_file.readline()
if "*xyz" in in_file_line:
xyz_start = True
_xyz, charge, mult = in_file_line.split()
xyz_end = False
while not xyz_end:
in_file_line = in_file.readline()
if "*" in in_file_line:
xyz_end = True
break
xyzs.append(in_file_line)
n_atoms = len(xyzs)
'''
----------------------------Fix keywords-----------------------
'''
for i in range(len(keywords)):
if keywords[i] == 'OptTS' or keywords[i] == 'Opt':
keywords[i] = 'LooseOpt'
if keywords[i] == 'TightOpt':
keywords[i] = 'LooseOpt'
if 'LooseOpt' not in keywords:
keywords.append('LooseOpt')
if 'Freq' in keywords:
keywords.remove('Freq')
'''
--------------------Generate and write a new geometry scan file----------
'''
# Get input for atoms to scan
raw_input = input('{:<50}'.format("Atoms to scan, seperated by a space:"))
print("NOTE: multiple character atoms are not supported")
atom1 = 0
atom2 = 0
# If only the indicies are defined use them directly
try:
atom1, atom2 = [int(x) for x in raw_input.split()]
# Else determine the indicies from label e.g. C1
except ValueError:
atom1_labeled, atom2_labeled = [x for x in raw_input.split()]
atom1_iterator = 0
for i in range(len(xyzs)):
line = xyzs[i]
if atom1_labeled.strip()[0] == line.split()[0]:
atom1_iterator +=1
if atom1_iterator == int(atom1_labeled.strip()[1:]):
atom1 = i
atom2_iterator = 0
for i in range(len(xyzs)):
line = xyzs[i]
if atom2_labeled.strip()[0] == line.split()[0]:
atom2_iterator +=1
if atom2_iterator == int(atom2_labeled.strip()[1:]):
atom2 = i
print('{:<50}''{:<10}'.format('Will scan indicies:',(str(atom1) + ' ' + str(atom2))))
xyz_atom1 = np.zeros(3)
xyz_atom2 = np.zeros(3)
atom1_label, xyz_atom1[0], xyz_atom1[1], xyz_atom1[2] = xyzs[atom1].split()
atom2_label, xyz_atom2[0], xyz_atom2[1], xyz_atom2[2] = xyzs[atom2].split()
distance = np.round(np.linalg.norm(xyz_atom1 - xyz_atom2),5)
print('{:<50}{:<10}'.format(atom1_label + '––' + atom2_label + " distance is", distance))
final_distance = input('{:<50}'.format("Final distance:"))
if final_distance == "":
if distance > 2.0: #2.0 Å is used as the cutoff between intially bonded and not
if atom1_label == "H":
final_distance = 1.1
elif atom2_label == "H":
final_distance = 1.1
else:
final_distance = 1.5
if distance <= 2.0:
final_distance = 2.8 #Default final distance for initally non-bonded atoms
print('{:<50}{:<10}'.format("Defaulted to a final distance of:", final_distance))
else:
float(final_distance)
no_steps = input('{:<50}'.format("Number of geometry scan steps:"))
if no_steps == "":
no_steps = int(np.round(np.abs(float(final_distance) - float(distance))/0.1, 0)) #Default 0.1Å step size
print('{:<50}''{:<10}'.format("Defaulted to ", str(no_steps) + " scan steps"))
else:
int(no_steps)
# --------------------------Construct the output scan file--------------
with open('scan.inp', 'w') as out_file:
print(' '.join(keywords), file=out_file)
print('%maxcore 4000 \n', '%geom Scan', sep='', file=out_file)
print('B', atom1, atom2, '=', end=' ', file=out_file)
print(distance, final_distance, no_steps, sep=', ', file=out_file)
print('end \n', 'end',sep='', file=out_file)
print('*xyz', charge, mult, file=out_file)
[print(line, file=out_file, end='') for line in xyzs]
print('*', file=out_file)
| true |
f837791d44be7a0100e8615c7da0af2626024bbd | Python | carpin17/test-repository | /helloshell.py | UTF-8 | 849 | 2.953125 | 3 | [] | no_license |
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> line 1 = 'Hello Python developer ...'
SyntaxError: invalid syntax
>>> 'Hello Python developer . . .'
'Hello Python developer . . .'
>>>
>>> line1 = 'Hello Python developer . . .'
>>> line2 = 'Welcome to the world of Python!'
>>> print(line1)
Hello Python developer . . .
>>> print(line2)
Welcome to the world of Python!
>>>
=============================== RESTART: Shell ===============================
>>> x = 3
>>> x
3
>>>
=============================== RESTART: Shell ===============================
>>> x
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
x
NameError: name 'x' is not defined
>>>
>>> #basically restarting the python shell causes the value to not be recognized anymore.
>>>
| true |
031aa6a524f9f42170015312b27624193060b893 | Python | Lairin-pdj/coding_test_practice_baekjoon | /23323_황소 다마고치.py | UTF-8 | 237 | 3.140625 | 3 | [] | no_license | import sys
input = sys.stdin.readline
# 파싱
t = int(input())
for _ in range(t):
n, m = map(int, input().split(" "))
temp = n
count = 0
while temp > 0:
count += 1
temp = temp // 2
print(count + m)
| true |
fdf68143b776315864a123a1e3258c34005fdfcd | Python | ored95/data-analysis-course | /dawp2020/hy-data-analysis-with-python-2020/part02-e02_file_listing/test/test_file_listing.py | UTF-8 | 3,455 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
import re
from tmc import points
from tmc.utils import load, get_stdout, patch_helper
module_name="src.file_listing"
file_listing = load(module_name, "file_listing")
ph = patch_helper(module_name)
@points('p02-02.1')
class FileListing(unittest.TestCase):
def test_size(self):
result=file_listing()
self.assertIsInstance(result, list, f"file_listing should return a list. Got {type(result)}.")
self.assertEqual(len(result), 47, msg="The returned list should contain 47 tuples!")
for t in result:
self.assertIsInstance(t, tuple, f"All entries in the return list should be tuples. {t} is of type {type(t)}.")
self.assertEqual(len(t), 6, msg="Each tuple should have six elements!")
def test_content(self):
result=file_listing()
self.assertIsInstance(result, list, f"file_listing should return a list. Got {type(result)}.")
for t in result:
self.assertIsInstance(t, tuple, f"All entries in the return list should be tuples. {t} is of type {type(t)}.")
self.assertIsInstance(t[0], int, msg="size has wrong type!")
self.assertIsInstance(t[1], str, msg="month has wrong type!")
self.assertIsInstance(t[2], int, msg="day has wrong type!")
self.assertIsInstance(t[3], int, msg="hour has wrong type!")
self.assertIsInstance(t[4], int, msg="minute has wrong type!")
self.assertIsInstance(t[5], str, msg="filename has wrong type!")
months = "Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split()
self.assertIn(t[1], months, msg="Incorrect month!")
self.assertIn(t[2], range(1,32),
msg="The day should be between 1 and 31 for tuple %s!" % (t,))
self.assertIn(t[3], range(0,24),
msg="The hour should be between 0 and 23 for tuple %s!" % (t,))
self.assertIn(t[4], range(0,60),
msg="The minutes should be between 0 and 59 for tuple %s!" % (t,))
def test_names(self):
result = file_listing()
self.assertIsInstance(result, list, f"file_listing should return a list. Got {type(result)}.")
names = {t[-1] for t in result}
self.assertTrue(names.issuperset({'example_figure.py', 'exception_hierarchy.pdf',
'exception_hierarchy.png', 'exception_hierarchy.svg', 'extra.ipynb', 'face.png',
'generate_contents.py', '.git', '.gitignore'}))
def test_called(self):
with patch('builtins.open', side_effect=open) as o,\
patch(ph('re.compile'), side_effect=re.compile) as c,\
patch(ph('re.match'), side_effect=re.match) as m,\
patch(ph('re.fullmatch'), side_effect=re.fullmatch) as fm,\
patch(ph('re.search'), side_effect=re.search) as s,\
patch(ph('re.findall'), side_effect=re.findall) as fa,\
patch(ph('re.finditer'), side_effect=re.finditer) as fi:
result=file_listing()
o.assert_called()
self.assertTrue(c.called or m.called or fm.called or s.called or fa.called or fi.called,
msg="Expected that one of the following was called: "
"re.match, re.fullmatch, re.search, re.findall, re.finditer!")
if __name__ == '__main__':
unittest.main()
| true |
3a56e7629bdf2a880613e61803418463293a7797 | Python | wenchuan/euler | /58.py | UTF-8 | 579 | 3.1875 | 3 | [] | no_license | #!/usr/bin/python
p = [False,False,True,True,False,True]
def isP(n):
for i in xrange(2,n):
if i * i > n:
return True
if n % i == 0:
return False
def isPrime(n):
if n < 0:
return False
if len(p) > n:
return p[n]
for i in xrange(len(p), n+1):
p.append(isP(i))
return p[n]
step = 2
x = 1
cnt = 0
i = 0
while True:
i += 1
for j in xrange(4):
x += step
cnt += isPrime(x)
step += 2
if cnt * 10 < 4 * i + 1:
print cnt, 4 * i + 1, i * 2 + 1
exit(0)
| true |
ac98135c12a7bcf00beb38860e234c8fef3781fe | Python | knowMandM/django | /web0315/myapp/models.py | UTF-8 | 2,424 | 2.96875 | 3 | [
"MIT"
] | permissive | from django.db import models
from django.conf import settings
import os, csv
# 定义字段在csv中的位置
title = 0
time = 1
author = 2
text = 3
web = 4
abstract = 9
newstype = 6
label = 8
csv_data = [] # 所有新闻数据
map_csv_data ={} # 按来源分类的新闻数据
type_csv_data = {} # 按新闻类型分类的新闻数据
nearly_csv_date = {} # 按相似度分类
nearly_type_data = {}# 只有有相似度的数据(上面的都是所有数据)
class CMyNew:
id = None
title = None
time = None
author = None
text = None
web = None
abstract = None
new_type = None
label = None
def get_data():
if len(csv_data) == 0:
load_data()
return csv_data
def load_data():
print("load csv start ...")
file_path = os.path.join(settings.BASE_DIR, "datas2.csv")
news = []
try:
news = csv.reader(open(file_path))
except Exception as err:
print(err)
for one_new in news:
if one_new[0] == 'title':
continue
else:
data = CMyNew()
data.time = one_new[time]
data.title = one_new[title]
data.web = one_new[web]
data.new_type = one_new[newstype]
data.text = one_new[text]
data.author = one_new[author]
data.abstract = one_new[abstract]
data.id = len(csv_data)
data.label = int(one_new[label])
#id2data
csv_data.append(data)
#web2datalist 来源
if not (data.web in map_csv_data):
map_csv_data[data.web] = []
map_csv_data[data.web].append(data)
# 相似度
if not (data.label in nearly_csv_date):
nearly_csv_date[data.label] = []
nearly_csv_date[data.label].append(data)
# 有相似度的数据
if data.label < 20:
if not (data.new_type in nearly_type_data):
nearly_type_data[data.new_type] = []
nearly_type_data[data.new_type].append(data)
#type2dataList 类型
if not (data.new_type in type_csv_data):
type_csv_data[data.new_type] = []
type_csv_data[data.new_type].append(data)
print("综合:", len(type_csv_data["综合"]))
print("load csv complete ...")
# 获取一下数据
get_data()
| true |
8b5815eaf67b478e3b8df4d428f3aef6288b80ca | Python | liuyuhua-ha/opencvStudy | /Bomb.py | UTF-8 | 1,243 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Author: Xingmo
import cv2
import numpy as np
# 读取图像
img = cv2.imread('test.jpg',0);
# 构造一个3×3的结构元素
element = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3))
# 膨胀图像 cv2.dilate(图像,元素结构)
dilate = cv2.dilate(img, element)
# 腐蚀图像 cv2.erode(图像,元素结构)
erode = cv2.erode(img, element)
# 将两幅图像相减获得边,第一个参数是膨胀后的图像,第二个参数是腐蚀后的图像
result = cv2.absdiff(dilate,erode);
cv2.imshow("result",result);
# 上面得到的结果是灰度图,cv2.threshold将其二值化以便更清楚的观察结果
# cv2.threshold(src , thresh, maxval, type[, dst]) 返回retval、dst
# cv2.threshold(图像, 阈值 , 最大值, 阈值类型) 返回值类型、返回处理后图像
# 阈值类型:THRESH_BINARY、THRESH_BINARY_INV、THRESH_TRUNC、THRESH_TOZERO、THRESH_TOZERO_INV
retval, result = cv2.threshold(result, 40, 255, cv2.THRESH_BINARY);
# 反色,即对二值图每个像素取反
result = cv2.bitwise_not(result);
# 显示图像
cv2.imshow("origin",img); # 原图
cv2.imshow("result",result);# 边缘检测图
cv2.imwrite('haha.jpg',result);
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
c7c2efbf1b515f361c3e68f8da0d57a1ce070778 | Python | RusseII/deephire2 | /Siamese-LSTM/code/ats.py | UTF-8 | 1,709 | 3.015625 | 3 | [] | no_license | import urllib2
import json
class ATS:
def __init__(self):
self.key = '81UZVRVH3uldQNobChyi65jPLQbFux3f'
# get all answers to the questionnaire
def get_response(self, url):
content = json.loads(urllib2.urlopen(url + self.key).read())
if not content:
print("list is empty")
return None
return content
# get names of a particular applicant
def get_names(self, applicant_id):
url = 'https://api.resumatorapi.com/v1/applicants/' + applicant_id + '?' + 'apikey='
response = self.get_response(url)
return response['first_name'], response['last_name']
# get answers for a particular questionnaire, of a particular application
def get_answers(self, applicant_id):
url = 'https://api.resumatorapi.com/v1/questionnaire_answers/questionnaire_id/questionnaire_20170325222929_1MD9CIFKLEZ58ZAX/applicant_id/' + applicant_id + '?apikey='
response = self.get_response(url)
return response
# main function that should be used to get all data (all information for all applicants)
def get_all(self):
url = 'https://api.resumatorapi.com/v1/jobs/job_20170322142227_DYY2JLPRNHJ2TKRE?apikey='
response = self.get_response(url)
people = []
for applicant_id in response['job_applicants']:
person_info = {}
first, last = self.get_names(applicant_id['prospect_id'])
person_info['answers'] = self.get_answers(applicant_id['prospect_id'])
person_info['first_name'] = first
person_info['last_name'] = last
people.append(person_info)
# print(people)
return people
#USEAGE
#ats = ATS()
#ats.get_all()
| true |
c0ee406560310463f242d9f206ed5a7a927ace9d | Python | riccitensor/btce-api | /samples/cancel-orders.py | UTF-8 | 1,359 | 3 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/python
import sys
import btceapi
# This sample shows use of a KeyHandler. For each API key in the file
# passed in as the first argument, all pending orders for the specified
# pair and type will be canceled.
if len(sys.argv) < 4:
print "Usage: cancel_orders.py <key file> <pair> <order type>"
print " key file - Path to a file containing key/secret/nonce data"
print " pair - A currency pair, such as btc_usd"
print " order type - Type of orders to process, either 'buy' or 'sell'"
sys.exit(1)
key_file = sys.argv[1]
pair = sys.argv[2]
order_type = unicode(sys.argv[3])
handler = btceapi.KeyHandler(key_file)
for key in handler.keys:
print "Canceling orders for key %s" % key
t = btceapi.TradeAPI(key, handler)
try:
# Get a list of orders for the given pair, and cancel the ones
# with the correct order type.
orders = t.activeOrders(pair=pair)
for o in orders:
if o.type == order_type:
print " Canceling %s %s order for %f @ %f" % (pair, order_type,
o.amount, o.rate)
t.cancelOrder(o.order_id)
if not orders:
print " There are no %s %s orders" % (pair, order_type)
except Exception as e:
print " An error occurred: %s" % e
| true |
f20940327d99dee58c7c7164d6648f1ec86e2107 | Python | kikugawa-shoma/Atcoder | /ABC/ABC148/ABC148E.py | UTF-8 | 222 | 3.40625 | 3 | [] | no_license |
N = int(input())
if N % 2 == 1:
print(0)
else:
beki5 = 1
sum0 = 0
while 1:
beki5 = beki5 * 5
if beki5 > N:
break
else:
sum0 += N//(beki5*2)
print(sum0)
| true |
c30d38556bef7a8e8410f485aaf49b56ec578c43 | Python | Gabutman/encrypt-decrypt | /Decrypt.py | UTF-8 | 494 | 2.5625 | 3 | [] | no_license | def list_text(t):
n = ""
for i in range(t):
n+=str(chr(i))
return n
def dm(t):
d=0
for i in t:
d+=ord(i)
return d
def dm0(t):
return [ord(i) for i in t]
t,d,s,m,k,md="","",list_text(1114112),dm(input("input the value:")),dm(input("input key:")),int(input("input mod:"))
if m < 1:
m = 1
if k < 1:
k = 1
if md < 1:
md = 3
for i in dm0(open(input("filename1:"),"r").read()):
t+=s[s.find(str(chr(int(i)-m)))-k%md]
f = open("filename2:","w")
f.write(t)
f.close()
| true |
2acc949504524109a687ccbd1298879d1fa9cb39 | Python | JericHunter/spdweather | /app.py | UTF-8 | 831 | 2.703125 | 3 | [] | no_license | from flask import Flask, render_template, request
import requests
import json
app = Flask(__name__) # initiating flask class for project
@app.route('/') # creating bridge
def home():
return render_template("index.html")
@app.route('/weather')
def weather():
APP_ID = '6b7fda170a093cef98d8ee2fb042472b'
WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather"
user_input = request.args.get("q")
params = {
'q': user_input,
'appid': APP_ID
}
#user input API call
r = requests.get(WEATHER_URL,params=params)
# code needs error handling
weather_result = r.json()
temp = int(weather_result["main"]["temp"])
return render_template("weather.html", user_input=user_input,weather_result=weather_result, temp=temp,)
if __name__ == "__main__":
app.run(debug=True)
| true |
b134bb466f0c43303bd0df233106210c357ceacd | Python | Nuria788/Aso_Python | /Operadores.py/Concatenacion.py | UTF-8 | 469 | 3.578125 | 4 | [] | no_license | nombre = 'Nuria'
#Con el + valor de la variable es una concatenación
print('Mi nombre es: ' + nombre)
# Concatena con un espacio al iniciio
print('Mi nombre es: ' , nombre)
# Para concatenar valores enteros necesitamos
# poner una coma delante porque sino solo concaena str.
total = 500
print('El total es: ', total)
# Se puede conccatenar varios valores poniendo una coma delante
print('El total es: ', total, '_ Vendidio a: ', nombre)
''' creando comentarios''' | true |
4e54000de9012947d920c99d8df62f9120a0fa10 | Python | PecnikL/GasilskaBrigada | /porocilo.py | UTF-8 | 750 | 2.984375 | 3 | [
"MIT"
] | permissive | import modeli
import baza
import sqlite3
import os
import datetime
now = datetime.datetime.now()
def napisi_porocilo(datoteka,leto=(now.year-1)):
f = open(datoteka, "w+")
f.write("Letno poročilo\n\n")
intervencije = modeli.vse_intervencije()
def dodaj_intervencije(intervencije, leto):
f.write("Vse intervencije iz leta "+ str(leto) + ":\n")
for i in range(len(intervencije)):
inter = intervencije[i]
zac_leto = inter[1].split(".")[2]
if int(zac_leto) == leto:
niz = str(i+1) + ". " +inter[1] + ", " + inter[3] + ", " + inter[5] + "\n"
f.write(niz)
return None
dodaj_intervencije(intervencije, leto)
f.close()
| true |
bbf6b0477c50f3439da0e535a698c9eeeb70265b | Python | ChangeXuan/GXUCG | /Python/PyTorch/PyTorchSelfNet.py | UTF-8 | 3,887 | 2.828125 | 3 | [] | no_license | # -------从头构建一个MNIST识别网络,以便更好的认识Torch.nn
# -------下载MNIST数据集到本地
from pathlib import Path
import requests
DATA_PATH = Path("data")
# 表示为PATH=data\mnist
PATH = DATA_PATH / "mnist"
# 在代码文件根目录下创建data\mnist
PATH.mkdir(parents=True, exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
# 如果数据集不存在则进入if
if not (PATH/FILENAME).exists():
print("Start Loading...")
# 下载文件到内存
content = requests.get(URL + FILENAME).content
# 把内存中的文件写入本地文件
(PATH/FILENAME).open("wb").write(content)
# -------读出数据
# pickle是用于序列化数据的一种特定于python的格式
import pickle
import gzip
# 这个数据集是采用numpy数组
# as_posize()将window样式改为UNIX样式
with gzip.open((PATH / FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
# -------重新格式化数据
from matplotlib import pyplot
import numpy as np
# 数据集中,每张图片的大小为28*28,但是储存是用的是一维,即784(28*28)
# 所以需要将一维数据还原为2二维
pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray")
# pyplot.show()
# print(x_train.shape)
# -------把numpy的数据转换为tensor
import torch
# 使用map和torch.tensor批量把numpy数据转换为tensor
x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))
# n为图片数量,c为一维图片的长度
n, c = x_train.shape
# print(x_train, y_train)
# print(x_train.shape)
# print(y_train.min(), y_train.max())
# -------从头开始构建一个神经网络
import math
# 尺寸大小为784*10(784行,10列)
weights = torch.randn(784, 10) / math.sqrt(784)
# 设置需要自动求梯度
weights.requires_grad_()
bias = torch.zeros(10, requires_grad=True)
# -------自己编写线性模型
def log_softmax(x):
# log(exp(x)/sum(exp(x)))
return x-x.exp().sum(-1).log().unsqueeze(-1)
def model(xb):
# @表示点积操作
return log_softmax(xb @ weights + bias)
bs = 64
# 取出前64个数据
xb = x_train[0: bs]
preds = model(xb)
print(preds[0], preds.shape)
# -------使用负对数似然来做损失函数
def nll(input, target):
# mean()为返回平均值
return -input[range(target.shape[0]), target].mean()
loss_func = nll
# -------检查随机权重的损失
yb = y_train[0: bs]
print(loss_func(preds, yb))
# -------计算精确度
def accuracy(out, yb):
# 返回张量上最大值的下标
preds = torch.argmax(out, dim=1)
# 以次判断每一项,若相等为1,不等为0,最后求均值
return (preds == yb).float().mean()
# -------检查随机权重的精确度
print(accuracy(preds, yb))
# -------对网络进行迭代训练
'''
- 选择小批次的大小(这里的bs为64,即每次训练64张)
- 使用模型去预测输出
- 计算损失
- 反向传播来更新权重这里是weights和bias
'''
# 这个是python单步调试器
from IPython.core.debugger import set_trace
lr = 0.5
epochs = 2
for epoch in range(epochs):
print('epoch: %d / epochs:'%epoch)
# 每次64张,共循环多少次能够用完50000张图片
for i in range((n-1) // bs + 1):
# set_trace()
start_i = i*bs
end_i = start_i+bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
preds = model(xb)
loss = loss_func(preds, yb)
print('loss is %f'%loss)
loss.backward()
with torch.no_grad():
weights -= weights.grad*lr
bias -= bias.grad*lr
weights.grad.zero_()
bias.grad.zero_()
print('-'*10)
# -------到此,我们搭建了一个没有隐藏层的只有一个softmax输出的最简单的分类网络
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
| true |
90b9a06498e2e270a3c2e0dcb56b741af2f1fa5c | Python | AlexMusabelliu/Steam-Cards | /steamcards.py | UTF-8 | 3,187 | 3.03125 | 3 | [] | no_license | #steam market comparison for booster packs vs steam cards
#make UI w/ OpenCV
from bs4 import BeautifulSoup, SoupStrainer
import requests
from math import fsum
#details about cards
def trading_cards(game_title, foil):
allPrice = []
if foil.lower() == 'y':
addend = '&category_753_cardborder%5B%5D=tag_cardborder_1'
else:
addend = '&category_753_cardborder%5B%5D=tag_cardborder_0'
getTitle = SoupStrainer(class_="market_listing_row market_recent_listing_row market_listing_searchresult")
req = requests.get('https://steamcommunity.com/market/search?q=&category_753_Game%5B%5D=any&category_753_item_class%5B%5D=tag_item_class_2&appid=753&q=' + game_title.replace(" ", "+") + addend)
soup = BeautifulSoup(req.content, 'lxml', parse_only=getTitle)
for x in soup.find_all(class_='market_listing_row market_recent_listing_row market_listing_searchresult'):
#print(x.find(class_='normal_price'))
for y in x.find_all(class_='normal_price'):
if y.string != None:
print(x.find(class_='market_listing_item_name').string + ': ' + x.find(class_='market_listing_game_name').string + ' | ' + y.string)
allPrice.append(float(y.string[1:]))
#print(allPrice)
Min = sorted(allPrice[0:3])
Max = sorted(allPrice[len(allPrice) - 4:len(allPrice) - 1])
nuMin = []
nuMax = []
for x in Min:
if x * .15 <= 0.02:
x -= 0.02
else:
x *= .85
nuMin.append(x)
for y in Max:
if y * .15 <= 0.02:
y -= 0.02
else:
y *= .85
nuMax.append(x)
Min = nuMin
Max = nuMax
print("Min gain: " + '$' + str(round(fsum(Min), 2)), "Max: " + '$' + str(round(fsum(Max), 2)))
booster(game_title, round(fsum(Min), 2))
#details about booster packs
def booster(game_title, unpackPrice):
req = requests.get('https://steamcommunity.com/market/search?q=&category_753_Game%5B%5D=any&category_753_item_class%5B%5D=tag_item_class_5&appid=753&q=' + game_title.replace(" ", "+"))
getTitle = SoupStrainer(class_="market_listing_row market_recent_listing_row market_listing_searchresult")
soup = BeautifulSoup(req.content, 'lxml', parse_only=getTitle)
for x in soup.find_all(class_='market_listing_row market_recent_listing_row market_listing_searchresult'):
#print(x.find(class_='normal_price'))
for y in x.find_all(class_='normal_price'):
if y.string != None:
price = float(y.string[1:])
if price * .15 <= 0.02:
price -= 0.02
else:
price *= .85
print(x.find(class_='market_listing_item_name').string + ' | ' + '$' + str(round(price, 2)))
print("Minimum net gain from unpacking: " + str(round(unpackPrice - price, 2)))
trading_cards(input('Name of game? '), input('Foil cards included? (Y/N) ')) | true |
f503b7e5c1dcd1b8d7cbf08f1b0a68bb75098404 | Python | PatrickUncle/python | /我的第二个Python代码.py | UTF-8 | 428 | 3.671875 | 4 | [] | no_license | # import random
# secret = random.randint(1,5)
# print("----------我爱潘淑虹----------")
# while 1:
#
# temp = input("请输入你的猜想:")
# guess = int(temp)
# if guess == secret:
# print("你真棒")
# break
# else:
# if guess > secret:
# print("大了 大了,重新来")
# else:
# print("小了 小了 重新来")
# print("游戏结束")
| true |
e17def36ecda2ea6c428a8da0d2428b49eeb4f8a | Python | eminamitani/HDNNP-tools | /BulkSi/checkSymmFunc/plotViolin-G1G4-bs.py | UTF-8 | 1,758 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
"""
This script is for plotting Violin chart of G1/G4-1/G4-5
of bad sample
"""
def plotViolinPers(symffile,plotfile,grpname,persdata):
symf= np.load(symffile)
symdata= symf['sym_func']
G1=[]
G41=[]
G45=[]
#Gather G1/G4-1/G4-5 for plotting Violin and outputting for Persistent
with open(persdata, 'w') as f1:
for eachsample in symdata:
for gdata in eachsample:
G1.append(gdata[0])
G41.append(gdata[25])
G45.append(gdata[29])
wdt=str(gdata[0])+" "+str(gdata[25])+" "+str(gdata[29])+"\n"
f1.write(wdt)
#Plot Violin chart of G1/G4-1/G4-5
plt.style.use('default')
sns.set()
sns.set_style('whitegrid')
sns.set_palette('gray')
fig = plt.figure()
ax = fig.add_subplot(111)
plttitle="["+grpname+"] Violin chart of G1/G4-1/G4-5"
plt.title(plttitle)
ax.violinplot([G1,G41,G45])
ax.set_xticks([1, 2, 3])
ax.set_xticklabels(["G1","G4-1","G4-5"])
ax.set_ylabel("Value of G")
plt.savefig(plotfile)
print(f'Violin chart of {grpname} is plotted')
plt.close()
if __name__ == '__main__':
bsfolder="/home/okugawa/HDNNP/Si-190808-bs/"
outfolder=bsfolder+"/result/symf/"
persoutfolder=outfolder+"Pers/"
grps=["bs1","bs2","bs3","bs4","bs5","bs6","bs7","bs8"]
#Plot Violin chart of MD 1000K/1200K
for grp in grps:
symffile=bsfolder+grp+"-d20n50/1/data/CrystalSi64/symmetry_function.npz"
plotfile=outfolder+grp+"-V.png"
persdata=persoutfolder+grp+".txt"
plotViolinPers(symffile,plotfile,grp,persdata) | true |
d17ede559164d8c5941891e04d0e8e71c84af500 | Python | karbekk/Python_Data_Structures | /Interview/Python_Excercises/Interview_Qs/timeIt.py | UTF-8 | 362 | 3.328125 | 3 | [] | no_license | # Tool for measuring execution time of small code snippets.
# The program is passed as an argument to timer.timeit function
# timeit function creates a Timer object -> which can be used to call timer function which is useful
# for operations related to time
import timeit
x = timeit.timeit()
print x.timeit('"-".join(str(n) for n in range(100))', number=10000) | true |
bc190ae1f60f4d483201aae82a0d566b0dfa5ad2 | Python | mbithenzomo/algorithms-in-python | /sorting_algorithms/quick_sort.py | UTF-8 | 856 | 4.59375 | 5 | [] | no_license | def quick_sort(my_list):
"""Takes in a list of numbers and performs a quick sort
Returns the list in ascending order
"""
length = len(my_list)
# If list contains only 1 number or no numbers at all
if(length <= 1):
return my_list
else:
# Set the pivot to be the first number in the list
pivot = my_list[0]
# Get list of all numbers greater than the pivot
greater = [number for number in my_list[1:] if number > pivot]
# Get list of all numbers lesser than or equal to the pivot
lesser = [number for number in my_list[1:] if number <= pivot]
# Recursively get sorted list
return quick_sort(lesser) + [pivot] + quick_sort(greater)
if __name__ == '__main__':
my_list = input('Enter a Python list containing numbers:\n')
print(quick_sort(my_list))
| true |
3f8478e54a3a48dabf4536795c09e16b2bf921b4 | Python | omerivlin/ann_gui | /ann_gui_logic.py | UTF-8 | 7,949 | 3.375 | 3 | [] | no_license | from PyQt5 import QtWidgets, uic
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
def get_combo_box(options, select_callback):
combo = QtWidgets.QComboBox()
combo.addItems(options)
combo.activated.connect(select_callback)
return combo
class CircleDrawer(QtWidgets.QWidget):
def __init__(self, radius):
QtWidgets.QWidget.__init__(self)
self.radius = radius
@staticmethod
def draw_circle(paint_object, x_position, y_position, radius):
center = QtCore.QPoint(x_position, y_position)
# optionally fill each circle yellow
paint_object.setBrush(QtCore.Qt.red)
paint_object.drawEllipse(center, radius, radius)
def paintEvent(self, event):
paint = QtGui.QPainter()
paint.begin(self)
# optional
paint.setRenderHint(QtGui.QPainter.Antialiasing)
# draw red circles
paint.setPen(QtCore.Qt.blue)
# Draw the circle in the middle of the parent widget
self.draw_circle(paint, event.rect().width() / 2, event.rect().height() / 2, self.radius)
paint.end()
def sizeHint(self):
return QtCore.QSize(self.radius * 2, self.radius * 2)
class NeuralNetworkMap(QtWidgets.QWidget):
''' An example of PySide/PyQt absolute positioning; the main window
inherits from QWidget, a convenient widget for an empty window. '''
NODE_SIZE = 7
def __init__(self, layers):
"""
Initiates and drawes a neural network drawing using the given layers.
@param layers: a list containing the amount of nodes in each layer.
for example - [4,7,6,9,2]
"""
# Initialize the object as a QWidget and
# set its title and minimum width
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('My Network!')
self.setMinimumWidth(300)
self.setMinimumHeight(300)
# Create the QVBoxLayout that lays out the whole form
self.main_layout = QtWidgets.QHBoxLayout()
self.setLayout(self.main_layout)
self.columns = []
self.nodes = {}
self._layers = []
self.layers = layers
@property
def layers(self):
return self._layers
@layers.setter
def layers(self, value):
if value != self._layers:
self._layers = value
self.delete_all_nodes()
self._create_layers_layout(value)
def delete_all_nodes(self):
for column, nodes in self.nodes.items():
for node in nodes:
self.columns[column].removeWidget(node)
node.deleteLater()
self.nodes = {}
self.columns = []
def _create_layers_layout(self, layers):
"""
Creates the circles in the layout.
:param layers: A list containing amount of circles in each layer.
:return:
"""
for layer_index, layer_nodes_amount in enumerate(layers):
# A column is a verticle box
current_column = QtWidgets.QVBoxLayout()
self.columns.append(current_column)
self.nodes[layer_index] = []
for node_index in range(layer_nodes_amount):
# Create a circle
current_circle = CircleDrawer(self.NODE_SIZE)
self.nodes[layer_index].append(current_circle)
# Add the nodes to the column
current_column.addWidget(current_circle)
# Add the column to the "network"
self.main_layout.addLayout(current_column)
@staticmethod
def _get_node_position(node):
"""
Since each node's circle is in a box - calculate the box's center
"""
node_x = (node.width() / 2) + node.x()
node_y = (node.height() / 2) + node.y()
return node_x, node_y
def paintEvent(self, event):
"""
This draws the lines that connects the circles
"""
paint = QtGui.QPainter()
paint.begin(self)
# optional
paint.setRenderHint(QtGui.QPainter.Antialiasing)
# draw red circles
paint.setPen(QtCore.Qt.blue)
for layer_index, layer_nodes in self.nodes.items():
for node in layer_nodes:
# Don't draw lines for the last layer
if layer_index == len(self.nodes) - 1:
continue
node_x, node_y = self._get_node_position(node)
# Connect each node with all the nodes in the next layer
next_layer = layer_index + 1
for next_node in self.nodes[next_layer]:
next_node_x, next_node_y = self._get_node_position(next_node)
paint.drawLine(node_x, node_y, next_node_x, next_node_y)
paint.end()
class MainWindow(QtWidgets.QMainWindow):
ACTIVATION_FUNCTION_COLUMN_INDEX = 1
NODES_AMOUNT_COLUMN_INDEX = 0
ACTIVATION_FUNCTIONS = ['relu', 'coco', 'yaron']
MAX_NODES_AMOUNT = 10
NODES_OPTIONS = [str(i) for i in range(1, MAX_NODES_AMOUNT)]
def __init__(self):
self.table_rows_count = 0
QtWidgets.QWidget.__init__(self)
uic.loadUi("ann_gui.ui", self)
self.add_layer_button.clicked.connect(self.add_layer)
self.remove_layer_button.clicked.connect(self.remove_layer)
self.activation_combos = {}
self.nodes_amount_combos = {}
self.network_drawing = NeuralNetworkMap([])
self.main_grid_layout.addWidget(self.network_drawing, 3, 4)
self._network_layers = []
def add_layer(self):
self.tableWidget.insertRow(self.table_rows_count)
# Making the middle cell combo box
current_row = self.table_rows_count
activation_combo_box = get_combo_box(self.ACTIVATION_FUNCTIONS, self.update_activation)
self.activation_combos[activation_combo_box] = current_row
self.tableWidget.setCellWidget(current_row, self.ACTIVATION_FUNCTION_COLUMN_INDEX, activation_combo_box)
nodes_amount_combo_box = get_combo_box(self.NODES_OPTIONS, self.update_nodes_amount)
self.nodes_amount_combos[nodes_amount_combo_box] = current_row
self.tableWidget.setCellWidget(current_row, self.NODES_AMOUNT_COLUMN_INDEX, nodes_amount_combo_box)
self.table_rows_count += 1
self.update_network_drawing()
def remove_layer(self):
self.tableWidget.removeRow(self.table_rows_count - 1)
self.table_rows_count -= 1
self.update_network_drawing()
def update_activation(self, activation_index):
sender = self.sender()
row = self.activation_combos[sender]
self.insert_value_to_table(self.tableWidget,
row,
self.ACTIVATION_FUNCTION_COLUMN_INDEX,
self.ACTIVATION_FUNCTIONS[activation_index])
def update_nodes_amount(self, activation_index):
sender = self.sender()
row = self.nodes_amount_combos[sender]
self.insert_value_to_table(self.tableWidget,
row,
self.NODES_AMOUNT_COLUMN_INDEX,
self.NODES_OPTIONS[activation_index])
self.update_network_drawing()
def insert_value_to_table(self, table_widget, row, column, value):
table_widget.setItem(row, column, QtWidgets.QTableWidgetItem(value))
def update_network_drawing(self):
layers_data = [int(self.tableWidget.cellWidget(row, self.NODES_AMOUNT_COLUMN_INDEX).currentText()) for row in range(self.tableWidget.rowCount())]
self.network_drawing.layers = layers_data
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec_()) | true |
bce9751b3a98f73efef3e5771d6d261cffbbfebb | Python | tthein15/COVID-Simulation | /India_PolynomialRegression_DC.py | UTF-8 | 6,001 | 2.90625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from India_Data_COVID import daily_cases_India_RA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
# Day 0 = 01/30/20
a = date_format
b = date_format[:276]
c = date_format[276:299]
x = np.int_(np.where(Data.iloc[India,3]))[0]
y = np.array(daily_cases_India_RA)
# Train/Test: Split Train: Feb-October; Test: November
xtrain = np.int_(np.where(Data.iloc[India,3]))[0][:276]
xtest = np.int_(np.where(Data.iloc[India,3]))[0][276:299]
ytrain = np.array(daily_cases_India_RA)[:276]
ytest = np.array(daily_cases_India_RA)[276:299]
# Reshape Data
x1 = xtrain.reshape(-1,1)
x1test = xtest.reshape(-1,1)
y1 = ytrain.reshape(-1,1)
y1test = ytest.reshape(-1,1)
# Check Fits and Plot
p1 = np.polyfit(x,y,1)
p2 = np.polyfit(x,y,2)
p3 = np.polyfit(x,y,3)
p4 = np.polyfit(x,y,4)
p5 = np.polyfit(x,y,5)
p6 = np.polyfit(x,y,6)
# Check fits
plt.figure(figsize=(12,5))
plt.grid()
plt.scatter(a,y, facecolor = 'none', edgecolor = 'blue', label = 'Data')
plt.plot(a,np.polyval(p2,x),'k--', label = 'Degree: 2')
plt.plot(a,np.polyval(p3,x),'y--', label = 'Degree: 3')
plt.plot(a,np.polyval(p4,x),'g--', label = 'Degree: 4')
plt.plot(a,np.polyval(p5,x),'b--', label = 'Degree: 5')
plt.plot(a,np.polyval(p6,x),'c--', label = 'Degree: 6')
plt.title('Daily Cases In India: Polynomial Regression Fits')
plt.ylabel('Daily Cases')
plt.xlabel('Days Since 12/31')
plt.legend()
plt.show()
#Cubic Fit
polynomial_features1 = PolynomialFeatures(degree = 3)
x_poly1 = polynomial_features1.fit_transform(x1)
x_poly_test1 = polynomial_features1.fit_transform(x1test)
model1 = LinearRegression()
model1.fit(x_poly1,y1)
y_poly_pred1 = np.around(model1.predict(x_poly_test1))
#Evaluate
r2_d3 = r2_score(y1test,y_poly_pred1)
mse3 = mean_squared_error(y1test,y_poly_pred1)
rmse3 = np.sqrt(mse3)
rmsle3 = mean_squared_log_error(y1test,y_poly_pred1)
#Quadratic Fit
polynomial_features2 = PolynomialFeatures(degree = 4)
x_poly2 = polynomial_features2.fit_transform(x1)
x_poly_test2 = polynomial_features2.fit_transform(x1test)
model2 = LinearRegression()
model2.fit(x_poly2,y1)
y_poly_pred2 = np.around(model2.predict(x_poly_test2))
#Evaluate
r2_d4 = r2_score(y1test,y_poly_pred2)
mse4 = mean_squared_error(y1test,y_poly_pred2)
rmse4 = np.sqrt(mse4)
#rmsle4 = mean_squared_log_error(y1test,y_poly_pred2)
# 5th Degree
polynomial_features3 = PolynomialFeatures(degree = 5)
x_poly3 = polynomial_features3.fit_transform(x1)
x_poly_test3 = polynomial_features3.fit_transform(x1test)
model3 = LinearRegression()
model3.fit(x_poly3,y1)
y_poly_pred3 = np.around(model3.predict(x_poly_test3))
#Evaluate
r2_d5 = r2_score(y1test,y_poly_pred3)
mse5 = mean_squared_error(y1test,y_poly_pred3)
rmse5 = np.sqrt(mse5)
#rmsle5 = np.sqrt(mean_squared_log_error(y1test,y_poly_pred3))
# 6th Degree
polynomial_features4 = PolynomialFeatures(degree = 6)
x_poly4 = polynomial_features4.fit_transform(x1)
x_poly_test4 = polynomial_features4.fit_transform(x1test)
model4 = LinearRegression()
model4.fit(x_poly4,y1)
y_poly_pred4 = np.around(model4.predict(x_poly_test4))
#Evaluate
r2_d6 = r2_score(y1test,y_poly_pred4)
mse6 = mean_squared_error(y1test,y_poly_pred4)
rmse6 = np.sqrt(mse6)
#rmsle6 = np.sqrt(mean_squared_log_error(y1test,y_poly_pred4))
# Create Data Table for Cubit Fit(Only Fit)
c = np.array(c)
y_poly_pred1 = y_poly_pred1.tolist()
y1test = np.around(y1test)
y1test = y1test.tolist()
# DataFrame
PRfits_India_DC = pd.DataFrame({'Dates': c, 'Cases':y1test, 'Degree = 3':y_poly_pred1})
PR_November_DC = PRfits_India_DC.to_excel("IndiaPolynomialRegressionNovemberCases.xlsx", sheet_name='NovemberDailyCasesPrediction')
# Evaluate Quadratic and 6th Degree Fit(Best fit)
coef1 = model1.coef_
intercept1 = model1.intercept_
PR3BestEval_India_DC = pd.DataFrame({'R2':[r2_d3],'RMSE':[rmse3],'RMSLE':[rmsle3]})
#December Predictions
future_days = np.arange(299,337,1)
future_days = future_days.reshape(-1,1)
future_days1 = polynomial_features1.fit_transform(future_days)
future_days2 = polynomial_features2.fit_transform(future_days)
future_days3 = polynomial_features3.fit_transform(future_days)
future_days4 = polynomial_features4.fit_transform(future_days)
future_pred_1 = np.around(model1.predict(future_days1))
future_pred_2 = model2.predict(future_days2)
future_pred_3 = model3.predict(future_days3)
future_pred_4 = model4.predict(future_days4)
future_days_gr = np.arange(1,np.size(future_days)+1,1)
# Plot Future Predictions
plt.figure(figsize=(12,5))
plt.grid()
plt.scatter(np.arange(-6,1,1),y[292:299], facecolor = 'none', edgecolor = 'blue')
plt.plot(future_days_gr,future_pred_1,'y--', label = 'Degree: 3')
plt.plot(future_days_gr,future_pred_2,'g--', label = 'Degree: 4')
plt.plot(future_days_gr,future_pred_3,'b--', label = 'Degree: 5')
plt.plot(future_days_gr,future_pred_4,'c--', label = 'Degree: 6')
plt.title('Daily Cases In India: Polynomial Regression(Predictions)')
plt.ylabel('Daily Cases')
plt.xlabel('Days Since 11/23/20')
plt.legend()
plt.show()
# Dataframe of Future Predictons
future_pred_1 = future_pred_1.tolist()
future_pred_2 = future_pred_2.tolist()
future_pred_3 = future_pred_3.tolist()
future_pred_4 = future_pred_4.tolist()
PR_Pred_India_DC = pd.DataFrame({'Days since 11/23':future_days_gr, 'Degree = 3':future_pred_1})
PR_December_DC = PR_Pred_India_DC.to_excel("IndiaPolynomialRegressionDecemberCases.xlsx", sheet_name='DecemberDailyCasesPrediction')
# Print Results
print('Cubic Polynomial Fit:')
print('Coefficients: ' + np.str(coef1))
print('Intercept: ' + np.str(intercept1))
print('RMSE Score: '+ np.str(rmse3))
print('RMSLE Score: '+ np.str(rmsle3))
print('R2 Score: '+ np.str(r2_d3))
print('-------------------------')
print('November Predictions:')
print(PRfits_India_DC)
print('-------------------------')
print('December Predictions:')
print(PR_Pred_India_DC)
| true |
0c4a5ec5a64ffbb80ac008d31ea416e88d24dbf3 | Python | saromanov/machine-learning-experiments | /experiments/lasagne_models.py | UTF-8 | 2,458 | 2.671875 | 3 | [] | no_license | import numpy as np
import theano
import theano.tensor as T
import lasagne
class TripletLayer(lasagne.layers.Layer):
'''
Networks is simple one layer feed forfard network
'''
def __init__(self, inp, num_units, W1=lasagne.init.Normal(.01),
W2=lasagne.init.Normal(.01),
W3 = lasagne.init.Normal(.01),
**kwargs):
super(TripletLayer, self).__init__(inp, **kwargs)
self.W1 = W1
self.W2 = W2
self.W3 = W3
def _loss(self, X1, X2):
return T.sqr((X1 - X2)**2)
def get_output_for(self, inp, inpplus, inpminus, **kwargs):
net1 = T.dot(inp, self.W1)
net2 = T.dot(inpplus, self.W2)
net3 = T.dot(inpminus, self.W3)
first = self._loss(net1, net2)
second = self._loss(net1, net3)
dplus = first/(first + second)
dminus = second/(second + first)
return dplus + dminus
def l_mlp(epochs):
inpd = T.tensor4('inputs')
target = T.ivector('targets')
inp = lasagne.layers.InputLayer(shape=(None,1,28,28,28))
inp = lasagne.layers.DimshuffleLayer(inp, (0, 'x', 1,2))
drop = lasagne.layers.GaussianNoiseLayer(inp, sigma=0.15)
h1 = lasagne.layers.DenseLayer(drop, num_units=800, nonlinearity=lasagne.nonlinearities.rectify)
drop2 = lasagne.layers.DropoutLayer(inp, p=0.5)
out = lasagne.layers.DenseLayer(drop2, num_units=10, nonlinearities = lasagne.nonlinearities.softmax)
predict = lasagne.layers.get_output(out)
loss = lasagne.objectives.categorical_crossentropy(predict, target)
loss = loss.mean()
test_predict = lasagne.layers.get_output(out)
test_loss = lasagne.objectives.categorical_crossentropy(predict, target)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_predict, axis=1), target), dtype=theano.config.floatX)
params = lasagne.layers.get_all_params(out, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9)
train_fn = theano.function([inpd, target], loss, updates=updates)
val_fn = theano.function([inpd, target], [test_loss, test_acc])
print("Start training..")
for epoch in range(epochs):
training_err = 0
training_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
train_err += train_err(inputs, targets)
training_batches += 1
l_mlp(20)
| true |
8b66cea360dabf71efc161afcbf4b6ad270f3fd3 | Python | NASA-AMMOS/MMGIS | /localization/pipeline/arm_position/msl/rover.py | UTF-8 | 9,779 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | """
Routines for handling articulation of the MSL rover.
Author: Parker Abercrombie <parker.abercrombie@jpl.nasa.gov>
"""
import math
import numpy as np
import pds
import transforms3d
from msl import pppcs
INVALID_ARM_ANGLE = 1e30
instruments = [ 'mahli', 'apxs', 'drill', 'drt', 'portioner', 'scoopbase' ]
tool_transforms = {
'mahli' : pppcs.RVRdMAHLI,
'apxs' : pppcs.RVRdAPXS,
'drill' : pppcs.RVRdDrill,
'drt' : pppcs.RVRdDRT,
'portioner' : pppcs.RVRdPortioner,
'scoopbase' : pppcs.RVRdScoopBase
}
def mast_position():
"""Indicates mast position in rover navigation frame."""
return np.array([ 0.80436, 0.55942, -1.90608 ])
class ArmPose(object):
def __init__(self, pds_header):
# Some PDS files use non-standard names for the arm group. Try a couple variations.
possible_groups = [
"ARM_ARTICULATION_STATE",
"ARM_ARTICULATION_STATE_PARMS"
]
for group_key in possible_groups:
if group_key in pds_header:
arm_group = pds_header[group_key]
self.init_arm_angles(arm_group)
break
def get_instrument_position(self, instrument):
"""Compute the position (in rover nav frame) of an instrument on the arm turret."""
try:
func = tool_transforms[instrument]
except KeyError:
raise ValueError("Unsupported instrument: " + instrument)
mat = func(
self.arm_angle_1,
self.arm_angle_2,
self.arm_angle_3,
self.arm_angle_4,
self.arm_angle_5)
vec4 = mat * np.matrix([0, 0, 0, 1]).transpose()
return np.squeeze(np.asarray(vec4[0:3])) # Discard w coord
def get_instrument_pointing(self, instrument):
"""Compute the pointing direction (in rover nav frame) of an instrument on the arm turret."""
try:
func = tool_transforms[instrument]
except KeyError:
raise ValueError("Unsupported instrument: " + instrument)
mat = func(
self.arm_angle_1,
self.arm_angle_2,
self.arm_angle_3,
self.arm_angle_4,
self.arm_angle_5)
# Z-axis is pointing axis. Transform the Z direction in the
# instrument coordinate system to rover nav frame. See
# Figure 3 on Page 14 of the PPCS document.
# (Note the PPPCS doc includes a note on page 10 that states that the
# pointing axis changed to X in Dec 2008, and that updated transform
# matrices would be included in a later revision of the document. However,
# I have not been able to find this updated document. The use of Z as the
# pointing axis is consistent with the 2009-01-09 version of the PPPCS doc.)
vec4 = mat * np.matrix([0, 0, 1, 0]).transpose()
direction = np.squeeze(np.asarray(vec4[0:3])) # Discard w coord
return transforms3d.utils.normalized_vector(direction) # Normalize
def get_turret_position(self):
mat = pppcs.RVRdARM6(
self.arm_angle_1,
self.arm_angle_2,
self.arm_angle_3,
self.arm_angle_4,
self.arm_angle_5)
return mat * np.matrix([0, 0, 0, 1]).transpose()
##############################
## PDS image parsing
##############################
def init_arm_angles(self, arm_group):
arm_angles = self.parse_angles(arm_group)
self.arm_angle_1 = self.get_arm_angle(arm_angles, ["JOINT 1 AZIMUTH", "JOINT 1 SHOULDER AZIMUTH"])
self.arm_angle_2 = self.get_arm_angle(arm_angles, ["JOINT 2 ELEVATION", "JOINT 2 SHOULDER ELEVATION"])
self.arm_angle_3 = self.get_arm_angle(arm_angles, ["JOINT 3 ELBOW"])
self.arm_angle_4 = self.get_arm_angle(arm_angles, ["JOINT 4 WRIST"])
self.arm_angle_5 = self.get_arm_angle(arm_angles, ["JOINT 5 TURRET"])
def parse_angles(self, arm_group):
angle_strings = arm_group["ARTICULATION_DEVICE_ANGLE"]
angles = [self.parse_angle(ss) for ss in angle_strings]
angle_names = arm_group["ARTICULATION_DEVICE_ANGLE_NAME"]
if len(angles) != len(angle_names):
raise ValueError("Malformed image header. Length of articulation angle list should match length of angle name list")
angle_dict = {}
for i, name in enumerate(angle_names):
name = name.strip()
angle_dict[name] = angles[i]
return angle_dict
def get_arm_angle(self, arm_group, possible_joint_names):
"""Read an arm angle from a parsed dictionary of angles.
This method first attempts to read the resolver angle for the given joint.
If the resolver angle is missing or invalid (1e30), then the method reads
the corresponding encoder angle.
In some cases, images contain an incorrect resolver angle that is very different
than the encoder angle. This method detects cases in which the resolver angle is
more than 10 degrees different than the encoder angle. In these cases, the encoder
angle is returned.
Parameters:
angleDict: Dictionary of named angles
possible_joint_names: List of names of the joint for which to retrieve angle. Some
APXS files have been known to use non-standard names for some angles
(e.g. "JOINT 1 AZIMUTH", "JOINT 1 SHOULDER AZIMUTH"). The first name that matches an
angle in the file header will be returned.
Returns:
Joint angle in radians.
"""
for joint in possible_joint_names:
resolver_key = joint + "-RESOLVER"
encoder_key = joint + "-ENCODER"
# Parse out resolver and encoder angles, and then decide which to use
resolver_angle = arm_group[resolver_key] if resolver_key in arm_group else None
encoder_angle = arm_group[encoder_key] if encoder_key in arm_group else None
raw_angle = arm_group[joint] if joint in arm_group else None
# In the normal case we have both resolver and encoder angles
if resolver_angle is not None and encoder_angle is not None:
return self.choose_resolver_or_encoder_angle(resolver_angle, encoder_angle)
elif resolver_angle is not None:
# Only have the resolver angle. Return it as long as it is valid.
return resolver_angle if self.is_valid_arm_angle(resolver_angle) else None
elif encoder_angle is not None:
return encoder_angle if self.is_valid_arm_angle(encoder_angle) else None
elif raw_angle is not None:
return raw_angle if self.is_valid_arm_angle(raw_angle) else None
# Unable to find any of the possible angle names.
raise KeyError("Unable to find angle for joint " + joint)
def choose_resolver_or_encoder_angle(self, resolver_angle, encoder_angle, resolver_tolerance_degrees=10):
"""Return either the resolver or encoder angle for a joint, preferring the resolver angle.
In normal cases the encoder and resolver angles are similar. In some abnormal cases resolver
angles that are very wrong. Detect cases in which the two angles are very different, and
return the encoder angle in those cases (if it is valid).
Parameters:
resolverToleranceDegrees: Use resolver angle is resolver and encoder are within
this tolerance. If angles are not within this tolerance, assume that resolver
is incorrect and return encoder
"""
if self.is_valid_arm_angle(resolver_angle):
angles_very_different = math.fabs(resolver_angle - encoder_angle) > math.degrees(resolver_tolerance_degrees)
if angles_very_different and is_valid_arm_angle(encoder_angle):
print("Resolver and encoder angles more than " + resolver_tolerance_degrees +
" degrees different. Using encoder angle. (" + filename + ")")
return encoder_angle
return resolver_angle
return encoder_angle
def is_valid_arm_angle(self, angle, epsilon=1e-10):
"""Determine if an arm joint angle represents a valid measurement.
This function compares an angle to the Invalid Angle value specified in PPPS Vol 6.
Parameters:
angle: Angle to test
epsilon: Epsilon value for floating point comparison. Consider angle invalid if within this number
of radians of the invalid angle value.
"""
return math.fabs(angle - INVALID_ARM_ANGLE) > epsilon
def parse_angle(self, angle_str):
"""Parse an angle from a string in the form "##.#### <rad>".
Parameters:
angle_str: String to parse
Return:
Angle in radians.
"""
parts = angle_str.strip().split(' ')
# Invalid angles don't always have <rad> on them. If this angle is invalid
# don't assert that we need <rad> to be specified
if len(parts) == 1:
d = float(parts[0])
if not is_valid_arm_angle(d):
return d
if len(parts) != 2:
raise ValueError("Unexpected angle format: " + angle_str + ". Expecting '##.### <rad>'")
if parts[1] != "<rad>":
raise NotImplementedError("Parsing non-radian angles is not supported")
return float(parts[0])
| true |
d5abb5ae8f9639bc14d3cdb5125d70ae9cc2d5a0 | Python | danielvogler/thermal_spallation_model | /scripts/temperature_profile.py | UTF-8 | 2,776 | 2.953125 | 3 | [] | no_license | '''
daniel vogler
'''
from matplotlib import pyplot as mp
import numpy as np
import csv
markerSize = 10.0
lineStyle = 'none'
legendLocation = "upper left"
Color = ['b', 'r', 'm', 'g']
conversionFactor = 1000
fontSize = 15
figuresize = [20, 16]
### temperature
T_max = 1200
T_min = 300.001
T_delta = T_max - T_min
T_initial = 310.002
T_intermediate = 330.003
T_mu = 0.0
T_sigma = 0.007
### times
times = [3.0, 4.0]
y_location = 0.15
x_location = [-0.023, -0.0, 0.0, 0.023]
### number of points
points = 100
### gaussian curve
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
### gaussian profile between minima and maxima
for mu, sig in [(T_mu, T_sigma) ]:
temp_initial = (T_initial-T_min)*gaussian(np.linspace(x_location[0], x_location[3], points), mu, sig) + T_min
temp_intermediate = (T_intermediate-T_min)*gaussian(np.linspace(x_location[0], x_location[3], points), mu, sig) + T_min
temp_final = T_delta*gaussian(np.linspace(x_location[0], x_location[3], points), mu, sig) + T_min
location = np.linspace(x_location[0], x_location[3], points)
mp.figure(num=None, figsize=(figuresize[0], figuresize[1]), dpi=80, facecolor='w', edgecolor='k')
mp.plot(location,temp_final, marker='o')
mp.ylabel('Temperature [K]', fontsize = fontSize)
mp.xlabel('Location [m]', fontsize = fontSize)
mp.xlim([x_location[0], x_location[3]])
mp.figure(num=None, figsize=(figuresize[0], figuresize[1]), dpi=80, facecolor='w', edgecolor='k')
mp.plot(location,temp_initial, marker='o')
mp.ylabel('Temperature [K]', fontsize = fontSize)
mp.xlabel('Location [m]', fontsize = fontSize)
mp.xlim([x_location[0], x_location[3]])
### write temperature profile to file
fileToSave = str("temperature_profile.txt")
with open(fileToSave, 'w') as csvfile:
writer = csv.writer(csvfile,delimiter=' ',quoting = csv.QUOTE_NONE,escapechar='\\',quotechar='')
writer.writerow(['%s' %('AXIS Y')])
writer.writerow(['%1.4f' %(y_location)])
writer.writerow(['%s' %('AXIS X')])
writer.writerow(['{:1.4f}'.format(x) for x in location])
writer.writerow(['%s' %('AXIS T')])
writer.writerow(['{:1.4f}'.format(x) for x in times])
writer.writerow(['%s' %('DATA')])
writer.writerow([T_min for x in temp_initial])
#writer.writerow(['{:1.4f}'.format(x) for x in temp_intermediate])
writer.writerow(['{:1.4f}'.format(x) for x in temp_final])
mp.show()
#temp_right = T_delta*gaussian(np.linspace(x_location[2], x_location[3], 2*points), mu, sig) + T_min
#temp_nozzle = T_delta*gaussian(np.linspace(x_location[1], x_location[2], 1), mu, 1000000) + T_min
#temp = np.concatenate((temp_left, temp_nozzle, temp_right), axis=0)
#temp = np.concatenate((temp_left, temp_right), axis=0)
| true |
07d1a674fabd2c68024414d7d34e44c1a07e668f | Python | Brandonjja/Kattis | /Python/BatterUp.py | UTF-8 | 197 | 3.203125 | 3 | [] | no_license | # Batter Up https://open.kattis.com/problems/batterup
n = int(input())
data = list(map(int, input().split()))
c = 0
sum = 0
for i in data:
if i >= 0:
c += 1
sum += i
print (sum / c) | true |
0210c284fc8e2520b05fa8efee4431a50d2dbd36 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_011/ch6_2019_08_19_14_34_02_969080.py | UTF-8 | 208 | 3.15625 | 3 | [] | no_license | def encontra_maximo(matriz):
maior = 0
for lista in matriz:
for e in lista:
if abs(e) > maior:
maior = abs(e)
return maior
| true |
3a039d3a9fec7a249bbf683c731d563b27c61f3d | Python | youlanhai/ProtoParser | /generators/ListGenerator.py | UTF-8 | 1,272 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
from Cheetah.Template import Template
from .Generator import Generator
class ListGenerator(Generator):
''' 消息文件列表生成器。用于调试目的
'''
def generate(self, inputFile, outputFile, module):
self.inputFile = inputFile
self.moduleName = os.path.splitext(os.path.basename(inputFile))[0]
ret = []
with open(outputFile, "w", encoding="utf-8", newline="\n") as f:
self.stream = f
for fileDesc in module.files.values():
self.collectMessages(ret, fileDesc)
self.writeMessageList(ret)
def collectMessages(self, ret, fileDesc):
for clsDesc in fileDesc.codes:
if clsDesc.type != "message": continue
protoName = "%s.%s" % (self.moduleName, clsDesc.name)
for attr in clsDesc.attributes:
mode = attr["mode"]
cmd = attr["cmd"]
method = attr["method"]
ret.append((cmd, mode, method, protoName))
return
def writeMessageList(self, messages):
split = {"up" : [], "dn" : []}
for msg in messages:
split[msg[1]].append(msg)
namespace = {
"messages" : messages,
"up_messages" : split["up"],
"dn_messages" : split["dn"],
}
fmt = self.template.TEMPLATE
tpl = Template(fmt, searchList = [namespace, self, self.template])
self.stream.write(str(tpl))
| true |
4614d262192546dd41d95102b96cc555d5d9c360 | Python | vocalpy/vak | /src/vak/datasets/frame_classification/window_dataset.py | UTF-8 | 8,163 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | from __future__ import annotations
import pathlib
from typing import Callable
import numpy as np
import numpy.typing as npt
import pandas as pd
from . import constants
from .metadata import Metadata
def get_window_inds(n_frames: int, window_size: int, stride: int = 1):
"""Get indices of windows for a :class:`WindowDataset`,
given the number of frames in the dataset,
the window size, and the stride.
This function is used by :class:`WindowDataset`
to compute the indices of windows in the dataset.
The length of the vector of indices it returns
is the number of windows in the dataset,
i.e., the number of samples.
Parameters
----------
n_frames : int
window_size : int
stride : int
Returns
-------
window_inds : numpy.ndarray
Vector of indices for windows.
During training, batches of windows are made
by grabbing indices randomly from this vector,
then getting windows of the specified size
from the arrays representing the input data
and targets for the neural network.
"""
return np.arange(stop=n_frames - (window_size - 1), step=stride)
class WindowDataset:
"""Dataset used for training neural network models
on the frame classification task.
where the source data consists of audio signals
or spectrograms of varying lengths.
Unlike
:class:`vak.datasets.frame_classification.FramesDataset`,
this class does not return entire samples
from the source dataset.
Instead each paired samples :math:`(x_i, y_i)`
returned by this dataset class consists of
a window :math:`x_i` of fixed length
:math:`w` from the underlying data ``X`` of total length :math:`T`.
Each :math:`y_i` is a vector of the same size :math:`w`, containing
an integer class label for each *frame* in the window :math:`x_i`.
The entire dataset consists of some number of windows
:math:`I` determined by a ``stride`` parameter :math:`s`,
:math:`I = (T - w) / s`.
The underlying data consists of single arrays
for both the input to the network ``X``
and the targets for the network output ``Y``.
These single arrays ``X`` and ``Y`` are
created by concatenating samples from the source
data, e.g., audio files or spectrogram arrays.
(This is true for
:class:`vak.datasets.frame_classification.FramesDataset`
as well.)
The dimensions of :math:`X` will be (channels, ..., frames),
i.e., audio will have dimensions (channels, samples)
and spectrograms will have dimensions
(channels, frequency bins, time bins).
The signal :math:`X` may be either audio or spectrogram,
meaning that a frame will be either a single sample
in an audio signal or a single time bin in a spectrogram.
The last dimension of ``X`` will always be the
number of total frames in the dataset,
either audio samples or spectrogram time bins,
and ``Y`` will be the same size, containing
an integer class label for each frame.
Attributes
----------
X : numpy.ndarray
Y : numpy.ndarray
window_size : int
frame_dur : float
Duration of a single frame, in seconds.
duration : float
Total duration of the dataset.
"""
def __init__(
self,
dataset_path: str | pathlib.Path,
dataset_df: pd.DataFrame,
split: str,
sample_ids: npt.NDArray,
inds_in_sample: npt.NDArray,
window_size: int,
frame_dur: float,
stride: int = 1,
window_inds: npt.NDArray | None = None,
transform: Callable | None = None,
target_transform: Callable | None = None
):
self.dataset_path = pathlib.Path(dataset_path)
self.split = split
dataset_df = dataset_df[dataset_df.split == split].copy()
self.dataset_df = dataset_df
self.frames_paths = self.dataset_df[constants.FRAMES_NPY_PATH_COL_NAME].values
self.frame_labels_paths = self.dataset_df[constants.FRAME_LABELS_NPY_PATH_COL_NAME].values
self.sample_ids = sample_ids
self.inds_in_sample = inds_in_sample
self.window_size = window_size
self.frame_dur = float(frame_dur)
self.stride = stride
if window_inds is None:
window_inds = get_window_inds(sample_ids.shape[-1], window_size, stride)
self.window_inds = window_inds
self.transform = transform
self.target_transform = target_transform
@property
def duration(self):
return self.sample_ids.shape[-1] * self.frame_dur
@property
def shape(self):
tmp_x_ind = 0
one_x, _ = self.__getitem__(tmp_x_ind)
# used by vak functions that need to determine size of window,
# e.g. when initializing a neural network model
return one_x.shape
def __getitem__(self, idx):
window_idx = self.window_inds[idx]
sample_ids = self.sample_ids[window_idx:window_idx + self.window_size]
uniq_sample_ids = np.unique(sample_ids)
if len(uniq_sample_ids) == 1:
sample_id = uniq_sample_ids[0]
frames = np.load(self.dataset_path / self.frames_paths[sample_id])
frame_labels = np.load(self.dataset_path / self.frame_labels_paths[sample_id])
elif len(uniq_sample_ids) > 1:
frames = []
frame_labels = []
for sample_id in sorted(uniq_sample_ids):
frames.append(
np.load(self.dataset_path / self.frames_paths[sample_id])
)
frame_labels.append(
np.load(self.dataset_path / self.frame_labels_paths[sample_id])
)
if all([frames_.ndim == 1 for frames_ in frames]):
# --> all 1-d audio vectors; if we specify `axis=1` here we'd get error
frames = np.concatenate(frames)
else:
frames = np.concatenate(frames, axis=1)
frame_labels = np.concatenate(frame_labels)
else:
raise ValueError(
f"Unexpected number of ``uniq_sample_ids``: {uniq_sample_ids}"
)
inds_in_sample = self.inds_in_sample[window_idx]
frames = frames[..., inds_in_sample:inds_in_sample + self.window_size]
frame_labels = frame_labels[inds_in_sample:inds_in_sample + self.window_size]
if self.transform:
frames = self.transform(frames)
if self.target_transform:
frame_labels = self.target_transform(frame_labels)
return frames, frame_labels
def __len__(self):
"""number of batches"""
return len(self.window_inds)
@classmethod
def from_dataset_path(
cls,
dataset_path: str | pathlib.Path,
window_size: int,
stride: int = 1,
split: str = "train",
transform: Callable | None = None,
target_transform: Callable | None = None
):
dataset_path = pathlib.Path(dataset_path)
metadata = Metadata.from_dataset_path(dataset_path)
frame_dur = metadata.frame_dur
dataset_csv_path = dataset_path / metadata.dataset_csv_filename
dataset_df = pd.read_csv(dataset_csv_path)
split_path = dataset_path / split
sample_ids_path = split_path / constants.SAMPLE_IDS_ARRAY_FILENAME
sample_ids = np.load(sample_ids_path)
inds_in_sample_path = split_path / constants.INDS_IN_SAMPLE_ARRAY_FILENAME
inds_in_sample = np.load(inds_in_sample_path)
window_inds_path = split_path / constants.WINDOW_INDS_ARRAY_FILENAME
if window_inds_path.exists():
window_inds = np.load(window_inds_path)
else:
window_inds = None
return cls(
dataset_path,
dataset_df,
split,
sample_ids,
inds_in_sample,
window_size,
frame_dur,
stride,
window_inds,
transform,
target_transform
)
| true |
d7abbdb68a78de57e08a2183d4aa63d1401410aa | Python | pfirsich/Pywi | /transform.py | UTF-8 | 757 | 2.953125 | 3 | [] | no_license | import math
import numpy as np
import npshortcuts as nps
def rotate(angle, vec):
rad = angle / 180.0 * math.pi
s = math.sin(rad)
c = math.cos(rad)
rot = np.array([[c,-s],[s,c]], dtype = float)
return np.dot( rot, vec )
class Transform(object):
def __init__(self):
self.position = nps.vec2(0,0)
self.offset = nps.vec2(0,0)
self.absoluteOffset = nps.vec2(0,0)
self._scale = nps.vec2(1,1)
self.angle = 0
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
if not hasattr(value, "__getitem__"):
self._scale = nps.vec2(value,value)
else:
self._scale = value
def apply(self, point):
return self.position + rotate(self.angle, (point + self.offset) * self._scale + self.absoluteOffset) | true |
c9d4ffbd6fa9cc2a961e25c410df501b5c45f60f | Python | WouterHeslinga/SpInDP | /firmware/vision_files/detect.py | UTF-8 | 1,831 | 2.859375 | 3 | [] | no_license | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import math
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
img = frame.array
# show the frame
img = cv2.GaussianBlur(img, (13,13), 0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh', thresh)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
for i in range(len(contours)):
contour = contours[i]
area = cv2.contourArea(contour)
if area < 500:
continue
perimeter = cv2.arcLength(contour, True)
factor = 4 * math.pi * area / perimeter ** 2
if factor > .3:
cv2.drawContours(img, [contour], -1, (0, 255, 0), 2)
M = cv2.moments(contour)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, ("Factor: %f" % factor),(cx,cy), font, .5, (0,255,0),2)
rawCapture.truncate(0)
cv2.imshow('img', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| true |
960b38e7a6899bb9d83410c5d7fd7ef0804920b3 | Python | steamed-stuffed-bread/Leetcode-calender | /645_set_mismatch.py | UTF-8 | 493 | 2.515625 | 3 | [] | no_license | # easy
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
rep = 0
for e in nums:
if nums[abs(e) - 1] > 0:
nums[abs(e) - 1] = -nums[abs(e) - 1]
else:
rep = abs(e)
lost = 0
for i in range(len(nums)):
if nums[i] > 0:
lost = i + 1
res = [rep, lost]
return res
| true |
79ceefe62644528b9ae68d5253306171e8582693 | Python | lantip/indonesian_stemmer | /test.py | UTF-8 | 679 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
from indonesian_stemmer import ILStemmer
stemmer = ILStemmer()
stemmer.OPTION['NO_NO_MATCH'] = False
stemmer.OPTION['NO_DIGIT_ONLY'] = False
stemmer.OPTION['ALLOW_HASHTAGS'] = True
text = 'memperingati tujuh #belasan'
print 'NO SORT_INSTANCE'
print('stemmed with suffix and prefix:', stemmer.stem(text))
print('stemmed root only:', stemmer.stem_root(text))
stemmer.OPTION['NO_NO_MATCH'] = True
stemmer.OPTION['NO_DIGIT_ONLY'] = True
stemmer.OPTION['ALLOW_HASHTAGS'] = False
stemmer.OPTION['SORT_INSTANCE'] = True
print "SORT_INSTANCE True"
print('stemmed with suffix and prefix:', stemmer.stem(text))
print('stemmed root only:', stemmer.stem_root(text))
| true |
fa63e2063b1a240df2c7225e580e7ec44fe133b2 | Python | akhan118/Self-learing-quant | /inpaAir.py | UTF-8 | 1,962 | 2.703125 | 3 | [
"MIT"
] | permissive | import pandas
import matplotlib.pyplot as plt
import gym
import random
import os
import numpy
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from sklearn import preprocessing
import tensorflow as tf
from tensorflow import keras
from keras.layers.recurrent import LSTM
from sklearn.preprocessing import Normalizer
from numpy import array
import math
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
numpy.random.seed(7)
dataframe = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# print(trainX)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
trainPredict = model.predict(trainX)
print(trainPredict)
| true |
9dd18acd7eda0d96df7f43674b3ca452c5e0b636 | Python | kharkarag/continuous-alpha-npi | /gym-drawobjects/shape_generator.py | UTF-8 | 1,955 | 2.84375 | 3 | [] | no_license | import time
import math
import numpy as np
from PIL import Image
from skimage.draw import line, circle_perimeter
# import gym
# import gym_drawobjects
# env = gym.make("drawobjects-v0")
# square_actions = [np.array([0,-10])] * 5 + \
# [np.array([10,0])] * 5 + \
# [np.array([0,10])] * 5 + \
# [np.array([-10,0])] * 5
# triangle_actions = [np.array([5,-5])] * 5 + \
# [np.array([5,5])] * 5 + \
# [np.array([-10,0])] * 5
# c_size = 4
# c_rate = 4
# circle_actions = [np.array([c_size*c_rate*math.sin(x), c_size*c_rate*math.cos(x)]) for x in np.arange(0, 6.28, 0.1*c_rate)]
# square_vertices = [(100,100), (50,100), (50, 150), (100, 150), (100,100)]
square_vertices = [(100,100), (50,100), (50, 50), (100, 50), (100,100)]
triangle_vertices = [(100,100), (125,75), (150, 100), (100,100)]
def draw_figure(vertices, name):
img = np.ones((200, 200), dtype=np.uint8)*255
for i, vertex in enumerate(vertices[1:], start=1):
rr, cc = line(vertices[i-1][0], vertices[i-1][1], vertex[0], vertex[1])
img[rr, cc] = 0
im = Image.fromarray(img)
im.save(name)
draw_figure(square_vertices, 'ref_img/square.jpg')
draw_figure(triangle_vertices, 'ref_img/triangle.jpg')
img = np.ones((200, 200), dtype=np.uint8)*255
rr, cc = circle_perimeter(100, 125, 25)
img[rr, cc] = 0
im = Image.fromarray(img)
im.save('ref_img/circle.jpg')
img = np.ones((200, 200), dtype=np.uint8)*255
for i, vertex in enumerate(square_vertices[1:], start=1):
rr, cc = line(square_vertices[i-1][0], square_vertices[i-1][1], vertex[0], vertex[1])
img[rr, cc] = 0
for i, vertex in enumerate(triangle_vertices[1:], start=1):
rr, cc = line(triangle_vertices[i-1][0], triangle_vertices[i-1][1], vertex[0], vertex[1])
img[rr, cc] = 0
rr, cc = circle_perimeter(100, 125, 25)
img[rr, cc] = 0
im = Image.fromarray(img)
im.save('ref_img/total_figure.jpg') | true |
b0027306c8a44915cdb79fb85b5e4a0f3082b867 | Python | cosmicRover/algoGrind | /backtracking/GenPar.py | UTF-8 | 620 | 3.4375 | 3 | [] | no_license | '''
Intro to backtrack.
Choice
Constraints
Goal
Time and space O(4^n/sqrt(n))
'''
class Solution:
def generateParenthesis(self, n: int) -> [str]:
return self.backtrack('', 0, 0, n, [])
def backtrack(self, s, left, right, n, arr):
#if we reach the goal, we append
if len(s) == 2 * n:
arr.append(s)
return
#permutation branches
if left < n:
self.backtrack(s+'(', left+1, right, n, arr)
if right < left:
self.backtrack(s+')', left, right+1, n, arr)
return arr | true |
eb656207bde2a34aa7f784080a8c3e35a2f1380a | Python | ogmobot/code-scraps | /py/countdown.py | UTF-8 | 206 | 2.875 | 3 | [] | no_license | #!/bin/python3
from sys import argv
def main():
if len(argv) > 1:
num = int(argv[1])
else:
num = 10
while num >= 0:
print(num)
num -= 1
return
if __name__=="__main__":
main()
| true |
dd2e5648c870ad6992a26c40cd4d1921733bc80f | Python | KzZe-Sama/Python_Assignment_II | /Qn11.py | UTF-8 | 239 | 3.234375 | 3 | [] | no_license | # FileName
file_name="ReadMe.txt"
print(file_name[slice(file_name.index("."))])
# Checking For Arbitrary Length
print("Checking if it works on arbitrary length:")
check="abcdefghijklmnopqrstuvwxyz.pdf"
print(check[slice(check.index("."))]) | true |
9f14eaa0dde5be3e3ab73cb07d0c3f655b4f9058 | Python | benscottie/py-sec | /load_filings.py | UTF-8 | 2,150 | 2.8125 | 3 | [] | no_license | # Libraries
from tqdm import tqdm
import multiprocessing
from datetime import datetime
import os
import time
from dotenv import load_dotenv
from scripts.data_utils import parse_item, get_audits, del_sec_dir
from scripts.db_utils import (create_postgress_engine,
add_filing_data)
COMPANY_LIST = ['AAPL', 'MSFT', 'GOOG']
BEFORE_YR = 2021
AFTER_YR = 2020
OUTPUT_DIR = ''
TABLE_NAME = 'item_sentiment'
load_dotenv()
def main(company_list, before_yr, after_yr, output_dir, table_name):
print(f'Connecting to Database...')
engine = create_postgress_engine(username=os.getenv('DB_USER'),
password=os.getenv('DB_PASSWORD'),
dialect_driver='postgresql',
host='sec-test.csfr6b0gmrjt.us-east-1.rds.amazonaws.com',
port='5432',
database=os.getenv('DB_NAME')) # connect
for company in tqdm(company_list, desc='Retrieving & adding company filing(s) to database', unit='company'):
print(f'Retrieving filing(s) for {company} between {after_yr} & {before_yr}...')
fpaths = get_audits(company, before_yr, after_yr, output_dir)
print(f'{len(fpaths)} file(s) retrieved, parsing items...')
with multiprocessing.Pool(4) as pool:
records = pool.starmap(parse_item, [(path, company, output_dir) for path in fpaths])
# Get Year Value from Date
year_fn = lambda x: datetime.strptime(x['date'], '%B %d, %Y').year
records = [dict(r, year=year_fn(r)) for r in records]
print(f'Adding {company} filing(s) to database...')
add_filing_data(engine, records, table_name)
print(f'{company} filing(s) stored to database')
# Delete Downloaded Directory
del_sec_dir(output_dir)
if __name__ == "__main__":
start_time = time.time()
main(COMPANY_LIST, BEFORE_YR, AFTER_YR, OUTPUT_DIR, TABLE_NAME)
print(f'Storing of record(s) complete')
print(f'Time Elapsed: {time.time()-start_time:.2f} seconds')
| true |
867e0f562a92e5747623cd4325e7c628c3a3b3cf | Python | Trixter9994/lazero | /bootstrap/keller/src/fakegen.py | UTF-8 | 148 | 2.703125 | 3 | [
"MIT"
] | permissive | # here we go, fake code.
p=[chr(x) for x in range(500)]
import random
k=""
for x in range(500*500):
k+=random.SystemRandom().choice(p)
print(k)
| true |
0ec76b6ab5015cd73839a43fa406e78e48c5f7c8 | Python | kami-sama-dp/vue_dp | / learnthread/decorate.py | UTF-8 | 516 | 3.828125 | 4 | [] | no_license | import time
def timethis(func):
def inner(*args, **kwargs):
print('start timer:')
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print('end timer: %fs.' % (end - start))
return result
return inner
@timethis
# 等价于 sleeps = timethis(sleeps) sleeps已经指向inner了
def sleeps(seconds):
print('sleeps begins:')
time.sleep(seconds)
print('sleep %d seconds. sleep over.' % seconds)
return seconds
print(sleeps(3)) | true |
97b2fdfa5abad84328aa3d8c84082907fcd52062 | Python | roger1993/odeGenerator | /src/graphParser.py | UTF-8 | 3,427 | 2.953125 | 3 | [] | no_license | import random
class Voter:
def __init__(self, opinion, uid):
self.__opinion = opinion
self.__friends = []
self.__uid = uid
self.__visited = False
def get_friends(self):
return self.__friends
def get_opinion(self):
return self.__opinion
def get_uid(self):
return self.__uid
def push_friend(self, friend):
self.__friends.append(friend)
def is_visited(self):
return self.__visited
def visiting(self):
self.__visited = True
class Cluster:
def __init__(self, uid, option, number):
self.__opinion = {}
self.update(option, number)
self.__uid = uid
def get_opinion(self):
return self.__opinion
def get_uid(self):
return self.__uid
def update(self, option, number):
self.__opinion[option] = number
class Relationship:
def __init__(self, id_from, id_to):
self.__nodes = id_from, id_to
def get_nodes(self):
return self.__nodes
class Usr_dict:
def __init__(self):
self.dict = {}
def push_usr(self, uid, cond_list):
if uid in self.dict.keys():
return self.dict[uid]
else:
self.dict[uid] = Voter(cond_list.pop(), uid)
return self.dict[uid]
class Cluster_dict:
def __init__(self):
self.dict = {}
def update_cluster(self, uid, option, number):
if uid in self.dict.keys():
self.dict[uid].update(option, number)
return self.dict[uid]
else:
self.dict[uid] = Cluster(uid, option, number)
return self.dict[uid]
class Edge_dict:
def __init__(self):
self.__dict = {}
self.__length = 0
def push_edge(self, id_from, id_to):
self.__dict[self.__length] = Relationship(id_from, id_to)
self.__length += 1
return self.__dict[self.__length - 1]
def get_dict(self):
return self.__dict
def parser_main_node(fname, conditions, is_cluster, directed = False ):
if fname == None:
fname = '../dat/practice.txt'
cond_list = []
for condition in conditions:
cond_list += [condition[0]] * int(condition[1])
random.shuffle(cond_list)
udict = Usr_dict()
func = lambda x, y: x.push_friend(y)
print('Loading the graph file at ' + fname)
with open(fname, 'r') as fin:
for line in fin:
nod_from, nod_to = [udict.push_usr(uid, cond_list)\
for uid in map(int, line.strip().split(' '))]
func(nod_from, nod_to)
if not directed: func(nod_to, nod_from)
print('Loading completed\n')
return udict.dict
def parser_main_edge(gfname, conditions, is_cluster, ifname = None):
import random
if gfname == None:
gfname = '../dat/graph practice'
if not is_cluster:
cond_list = []
for condition in conditions:
cond_list += [condition[0]] * int(condition[1])
random.shuffle(cond_list)
udict = Usr_dict()
else:
pass
edict = Edge_dict()
print('Loading the graph file at ' + gfname)
if not is_cluster:
with open(gfname, 'r') as gfin:
for line in gfin:
id_from, id_to = map(int, line.strip().split())
udict.push_usr(id_from, cond_list)
udict.push_usr(id_to, cond_list)
edict.push_edge(id_from, id_to)
else:
udict = Cluster_dict()
with open(gfname, 'r') as gfin:
with open(ifname, 'r') as ifin:
for line in ifin:
option, uid, number = line.strip().split()
udict.update_cluster(uid, option, number)
for line in gfin:
id_from, id_to = map(int, line.strip().split())
edict.push_edge(id_from, id_to)
print('Loading completed')
return edict.get_dict(), udict.dict
if __name__ == '__main__':
edgeDict, graphDict = parser_main_edge(None, None, True)
| true |
d692a5ebb61638ecbb6b322107d296305cb8b7a1 | Python | AndrewMiranda/holbertonschool-machine_learning-1 | /supervised_learning/0x06-keras/12-test.py | UTF-8 | 640 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
"""File that contains the functions test_model"""
import tensorflow.keras as K
def test_model(network, data, labels, verbose=True):
"""
Function that tests a neural network
Args:
network is the network model to test
data is the input data to test the model with
labels are the correct one-hot labels of data
verbose is a boolean that determines if output should be
printed during the testing process
Returns: the loss and accuracy of the model with the testing
data, respectively
"""
evaluation = network.evaluate(data, labels, verbose=verbose)
return evaluation
| true |
fc9e0136e71740a9b269addf43388648fa606da4 | Python | Quantenradierer/SINner | /backend/npc_creator/tests/operations/base_integration_test.py | UTF-8 | 746 | 2.734375 | 3 | [] | no_license | import tempfile
from django.test import TestCase
class BaseIntegrationTest(TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self.tempdir.cleanup()
def mock_sqlite_file(self, mock_file):
"""
This method replaces `config.SQLITE_FILE` with the mock object `mock_file`.
It should be used in a test method decorator, for example:
@patch('config.SQLITE_FILE', new_callable=str)
def test_something(self, mock_file):
self.mock_sqlite_file(mock_file)
# Continue with the actual test here
"""
mock_file.return_value = 'mocked_file.sqlite'
self.addCleanup(mock_file.stopall)
| true |
f3f9c92d385c53fbf56b77f5fbab02e9972a8589 | Python | dreamyyn/DeepStroke | /nonreperfusion_metrics.py | UTF-8 | 15,690 | 2.578125 | 3 | [] | no_license | import sys
sys.path.insert(0, '/Users/admin/stroke_DL/script_stroke')
import nibabel as nib
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.metrics import auc, precision_score, recall_score, roc_curve
from nibabel import processing
from skimage import morphology
from create_fig_for_model import *
def dice_score(y_true, y_pred, smooth=0.0000001, threshold_true=0.1, threshold_pred=0.5, model='>'):
'''
:param y_true: array for ground truth
:param y_pred: array for output
:param smooth: usually no need to change, prevent 0/0 .
:param threshold_true: above which ground truth is considered 1, the rest is 0
:param threshold_pred: above / below which output is considered 1, the rest is 0
:param model: '>' above the threshold_pred; '<' below the threshold_pred but >0
:return: a dice score.
'''
y_true_f = y_true.flatten() >= threshold_true
if model == '<':
y_pred_f = np.logical_and(y_pred.flatten() <= threshold_pred, y_pred.flatten() > 0)
else:
y_pred_f = y_pred.flatten() >= threshold_pred
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def specificity(y_true, y_pred, smooth=0.00001, threshold_true=0.1, threshold_pred=0.5, model='>'):
y_neg_f = y_true.flatten() < threshold_true
if model == '<':
y_pred_pos_f = np.logical_and(y_pred.flatten() <= threshold_pred, y_pred.flatten() > 0)
else:
y_pred_pos_f = y_pred.flatten() >= threshold_pred
false_pos = np.sum(y_neg_f * y_pred_pos_f)
return np.sum(y_neg_f) / (np.sum(y_neg_f) + false_pos + smooth)
def vol_diff(y_true, y_pred, threshold_true=0.1, threshold_pred=0.5, model='>'):
y_true_f = y_true.flatten() >= threshold_true
if model == '<':
y_pred_f = np.logical_and(y_pred.flatten() <= threshold_pred, y_pred.flatten() > 0)
else:
y_pred_f = y_pred.flatten() >= threshold_pred
return np.sum(y_pred_f) - np.sum(y_true_f)
def vol_pred(y_pred,threshold_pred=0.5):
y_pred_f = y_pred.flatten() >= threshold_pred
return np.sum(y_pred_f)
def weighted_dice(y_true,y_pred,smooth = 0.00001,threshold_true = 0.1, threshold_pred =0.5):
y_true_f = y_true.flatten() >= threshold_true
y_pred_f = y_pred.flatten() >= threshold_pred
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection) / (1.7 * np.sum(y_true_f) + 0.3 * np.sum(y_pred_f) + smooth)
def get_max_for_each_slice(data):
'''
data has to be 3d
'''
assert len(data.shape) == 3 , 'input data is not 3d'
max_list = []
for slice_num in range(data.shape[2]):
max_list.append(np.max(data[:,:,slice_num]))
return max_list
def define_laterality(data,threshold):
midline = int(data.shape[0] / 2)
max_list_gt = get_max_for_each_slice(data)
lesion_left = 0
lesion_right = 0
lesion_side = ''
for slice_num in range(len(max_list_gt)):
if max_list_gt[slice_num] > threshold:
if np.sum(data[:midline, :, slice_num]) > np.sum(data[midline:, :,
slice_num]): ## If stroke in Left side of the image and Right side of the brain
lesion_left += 1
elif np.sum(data[:midline, :, slice_num]) < np.sum(data[midline:, :,
slice_num]): ## If stroke in Right side of the image and Left side of the brain
lesion_right += 1
if (lesion_left > lesion_right and (lesion_right > 3)) or (lesion_left < lesion_right and (lesion_left > 3)):
lesion_side = 'B'
elif lesion_left > lesion_right:
lesion_side = 'L'
elif lesion_right > lesion_left:
lesion_side = 'R'
# print(lesion_left,lesion_right)
return lesion_side
def metrics_output(y_true, y_pred, threshold_true, threshold_pred):
'''output all the metrics including auc dice recall precision f1score and volume difference'''
fpr, tpr, thresholds = roc_curve(y_true > threshold_true, y_pred)
auc_hemisphere = auc(fpr, tpr)
precision = precision_score(y_true > threshold_true, y_pred > threshold_pred)
recall = recall_score(y_true > threshold_true, y_pred > threshold_pred)
dice = dice_score(y_true, y_pred, threshold_true=threshold_true, threshold_pred=threshold_pred)
spec = specificity(y_true, y_pred, threshold_true=threshold_true, threshold_pred=threshold_pred)
voldiff = 0.008 * vol_diff(y_true, y_pred, threshold_true=threshold_true, threshold_pred=threshold_pred)
volpred = 0.008 * vol_pred(y_pred, threshold_pred)
f1score = 2 * precision * recall / (precision + recall + 0.0001)
return auc_hemisphere, precision, recall, dice, spec, voldiff, volpred, f1score, fpr, tpr
list_nonreper_test1 = ["03032","01020","01002","05002","08009","03045","11003","01042","30058A","30082A","08010","30049A","30073A","30019"]
list_nonreper_test2 = ["09005","05011","05010","03002","01021","10002","30008A","30054","01010","30030A","01047","30032","03028","30087"]
list_nonreper_test3 = ["09007","05006","11004","01017","03040","01007","05008","30007","01036","09006","09002","30043","10001"]
list_nonreper_test4 = ["03025","30037","09003","05007","05012","05003","30027A","05005","11002","10006","10007","30084A","02003"]
list_nonreper_test5 = ["01027","01038","01041","02005","01040","08007","10004","03018","09004","01004","01045","10005","11001"]
list_reper_test1 = ["30012","01028","30042","30116","03027","30006A","03017","30068","03043","03016","01003","03046","30099","30117","30046A","30102","03042","30028","03047","01029","01006","30044"]
list_reper_test2 = ["08005","01048","30063","30101","05001","10003","03008","30069A","08008","30035A","03007","30041","30040","30096","30126","08003","30108","30097","03033","30120","01001","30039"]
# 30018A belongs to test 3, but with no GRE. so temperarily removed. if tested in any combination with no GRE, should include 30018A.
list_reper_test3 = ["30018A","03036","30122","30048A","30053","03039","30098","30055","03026","01015","03037","30127","30078A","30071A","03024","01043","03011","30057A","30024A","30115","30002A","30016"]
list_reper_test4 = ["03013","03035","08001","03031","30056","02004","03041","01032","03019","30103","30023","30061","03009","03020","30047A","30026A","30109","05009","03048","10009","30090A","30015"]
list_reper_test5 = ["30092","30022A","30034","30106","30080","03001","01049","12001","30113","01044","30075A","30059A","03003","30077A","30045A","30124","02006","30001A","30005","30011","30014","30051"]
list_all_test1 = ["03043","30034","30124","30037","30069A","30058A","01027","30023","12001","03045","10002","08007","30109","05011","10009","01047","10005","30097","11001","10001","08009","30095","30117","11002","30113","03046","09005","30082A","30007","03007","30032","03048","01017","03019","30022A","30005","30011"]
# 08004 removed from test 2 because no lesion
list_all_test2 = ["03026","01001","05006","05008","30102","01049","01015","30080","30061","30045A","30054","08010","30068","03036","11003","09007","01042","10006","30012","30053","03042","09004","30063","01003","01038","30110","08001","02005","30101","30006A","01006","30127","03039","05007","30014","30015"]
list_all_test3 = ["03011","30057A","01040","03008","30024A","01002","03032","30042","03028","30108","05010","30104","03001","30084A","03025","30075A","30106","05002","30027A","01010","03003","30103","30047A","01007","03047","05009","03035","30049A","03018","30115","30028","03041","30002A","03024","01045","30016","30019"]
list_all_test4 = ["03009","01041","03037","30090A","02004","01028","30041","01020","30025","02001","30040","30122","30077A","09002","30120","03013","02003","05003","30098","01021","08008","11004","30008A","05012","03033","01048","30099","30071A","01044","30030A","30048A","30055","30018A","03020","03031","30039","30044"]
list_all_test5 = ["03017","30035A","30001A","01043","30096","01004","10003","08005","30073A","30056","05001","03027","09006","30100","08003","10004","30116","30092","02006","30026A","30046A","03002","03040","09003","30059A","30043","01029","03016","30078A","10007","05005","01036","30126","01032","30051","30087"]
list_lg_test1 = ["30027A","30054","05010","30073A","01041","09005"]
list_lg_test2 = ["09002","30058A","10001","01040","03002","10005"]
list_lg_test3 = ["05002","05012","30082A","03025","01045","30084A"]
list_lg_test4 = ["01038","30030A","30049A","03032","10006","09003"]
list_lg_test5 = ["10007","01047","03018","05003","08010","11001","02003"]
subj_list_lg = sorted(list_lg_test1)+sorted(list_lg_test2)+sorted(list_lg_test3)+sorted(list_lg_test4)+sorted(list_lg_test5)
# grouped based on reperfusion rate <30% or >70%
# list_nonreper_test1 = ["30032","01041","30006A","10006","05010","30054","08007"]
# list_nonreper_test2 = ["01020","03040","03008","01017","03001","01040","30058A"]
# list_nonreper_test3 = ["11001","10005","11002","30082A","05002","30027A","03041"]
# list_nonreper_test4 = ["30043","03027","30037","03032","01027","30073A","01021"]
# list_nonreper_test5 = ["11003","01003","10002","09004","05005","30049A","09002"]
# list_reper_test1 = ["30077A","30108","08010","03042","09005","30023","30059A","30045A","08005","03035","03020","30061","03036","10009"]
# list_reper_test2 = ["03048","08008","03033","01036","03037","30101","30022A","05011","30098","08003","30056","05012","30122","05001"]
# list_reper_test3 = ["30041","10004","30028","30047A","09007","02005","05009","30046A","30126","30042","09006","10003","30002A","01010"]
# list_reper_test4 = ["12001","30055","03043","03031","03013","30124","03019","01029","30035A","08001","03047","30127","03028","01015"]
# list_reper_test5 = ["30113","01006","30075A","30001A","03026","30026A","03011","10007","03024","01044","01043","03009","03016","30096"]
# subj_list_core = sorted(list_reper_test1)+sorted(list_reper_test2)+sorted(list_reper_test3)+sorted(list_reper_test4)+sorted(list_reper_test5)
subj_list_penumbra = sorted(list_nonreper_test1)+sorted(list_nonreper_test2)+sorted(list_nonreper_test3)+sorted(list_nonreper_test4)+sorted(list_nonreper_test5)
subj_list_core = sorted(list_reper_test1)+sorted(list_reper_test2)+sorted(list_reper_test3)+sorted(list_reper_test4)+sorted(list_reper_test5)
subj_list_all = list_all_test1 + list_all_test2 + list_all_test3 + list_all_test4 + list_all_test5
# print(subj_list_core)
subj_path = '/Users/admin/deepstroke173/PWImasked185/'
threshold_true = 0.9
# threshold_pred = 60
# list_result = {'auc': [], 'precision': [], 'recall': [], 'dice': [], 'auc_all': []}
all_y_true = np.array([])
all_y_pred = np.array([])
all_y_tmax = np.array([])
for subject_name in subj_list_all:
# for subject_name in ['30063','30069A','30096','30097']:
# for subject_name in ['01007']:
# load data
TMAX_path = subj_path + subject_name + '/PWITMAX.nii'
lesion_path = subj_path + subject_name + '/LESION.nii'
DWI_path = subj_path + subject_name + '/DWI.nii'
ADC_path = subj_path + subject_name + '/ADC.nii'
TMAX_load = nib.load(TMAX_path)
## for tmax map, smooth out the image
tmax_smooth = processing.smooth_image(TMAX_load, 3)
tmax = tmax_smooth.get_fdata()
lesion_load = nib.load(lesion_path)
lesion = lesion_load.get_fdata()
lesion[np.isnan(lesion)] = 0
DWI_load = nib.load(DWI_path)
dwi = DWI_load.get_fdata()
brain_mask_data = nib.load('/Users/admin/controls_stroke_DL/001/T1_cerebrum.nii')
brain_mask = brain_mask_data.get_fdata()
# calcualte dwi mean to remove ventricles.
dwi = np.maximum(0, np.nan_to_num(dwi, 0))
dwi = dwi * (brain_mask > 0)
mean_dwi = np.mean(dwi[np.nonzero(dwi)])
# load pwi mask
adc_load = nib.load(ADC_path)
adc = adc_load.get_fdata()
y_true_data = []
y_pred_data = []
y_true_tmax = []
if int(subject_name[:5]) >= 30059:
# print(subject_name)
threshold_pred = 6
else:
threshold_pred = 60
if subject_name in ['08005', '08008', '08009']:
adc_threshold = 310
else:
adc_threshold = 620
lesion_side = define_laterality(lesion[:,:,:], threshold_true)
midline = int(lesion.shape[0] / 2)
if lesion_side != 'B':
if lesion_side == 'L': ## If stroke in Left side of the image and Right side of the brain
brain_mask[midline:, :, :] = 0
elif lesion_side == 'R': ## If stroke in Right side of the image and Left side of the brain
brain_mask[:midline, :, :] = 0
else:
print('check code and data. Left lesion = Right lesion ')
# print(subject_name,lesion_side,np.sum(brain_mask > 0 * 1.))
max_list_gt = get_max_for_each_slice(lesion)
max_list_output = get_max_for_each_slice(tmax)
# print(mean_dwi, np.sum((np.asarray(max_list_output[13:73]) > threshold_pred) * 1.),np.sum(np.asarray(max_list_gt[13:73]) > threshold_true * 1.))
for slice_num in range(lesion.shape[2]):
# if max_list_gt[slice_num] > threshold_true or max_list_output[slice_num] >threshold_pred:
# if np.max(lesion[:, :, slice_num]) > threshold_true:
y_pred_raw = np.logical_or(tmax[:, :, slice_num] > threshold_pred, np.logical_and(adc[:, :, slice_num] < adc_threshold, adc[:,:,slice_num] > 0))
y_pred_raw = morphology.remove_small_objects(y_pred_raw, 125) ## remove small objects below 100 pixel
mask = (brain_mask[:, :, slice_num] > 0) * 1.
mask[mask == 0] = np.NaN
dwi_mask = dwi[:, :, slice_num] > (0.3 * mean_dwi) # remove ventricles
if threshold_pred == 60:
y_true_tmax_masked = (tmax[:,:,slice_num] * mask) * dwi_mask / 10
else:
y_true_tmax_masked = (tmax[:, :, slice_num] * mask) * dwi_mask
y_true_tmax.append(y_true_tmax_masked)
y_true_masked = (lesion[:, :, slice_num] * mask) * dwi_mask
y_true_data.append(y_true_masked)
y_pred_masked = (y_pred_raw * mask) * dwi_mask
y_pred_data.append(y_pred_masked)
y_true = np.array(y_true_data).flatten()
y_true = y_true[~np.isnan(y_true)]
y_pred = np.array(y_pred_data).flatten()
y_pred = y_pred[~np.isnan(y_pred)]
y_tmax = np.array(y_true_tmax).flatten()
y_tmax = y_tmax[~np.isnan(y_tmax)]
# print(len(y_pred))
all_y_true = np.append(all_y_true, y_true)
# all_y_pred = np.append(all_y_pred, y_pred)
all_y_tmax = np.append(all_y_tmax, y_tmax)
auc_hemisphere, precision, recall, dice, spec, voldiff, volpred, f1score,_,_ = metrics_output(y_true, y_pred,
threshold_true,
0.5)
fpr, tpr, thresholds = roc_curve(y_true > threshold_true, y_tmax)
auc_tmax = auc(fpr,tpr)
# weighted_dice_score = weighted_dice(y_true, y_pred, threshold_true=threshold_true, threshold_pred=0.5)
# print(np.sum(y_true>=0.9)*0.008)
print(subject_name, dice, auc_hemisphere, precision, recall, spec, voldiff, volpred, abs(voldiff), auc_tmax)
# print(subject_name, lesion_side)
# print(auc_tmax)
fpr, tpr, thresholds = roc_curve(all_y_true > threshold_true, all_y_tmax)
all_auc_hemisphere = auc(fpr, tpr)
print(len(tpr),len(thresholds))
create_roc(fpr, tpr, all_auc_hemisphere, '/Users/admin/stroke_DL/results/tmax+adc/',thresholds,
figname='roc_allpatient.png',
tablename='roc_allpatient.csv', datawrite=True)
# tmax_auc_hemisphere = metrics_output(all_y_tmax, all_y_pred)
# print(all_dice, all_auc_hemisphere, all_precision, all_recall, all_voldiff)
| true |
9adce2cd764a5884cf097c822306d4ed4dd60ecc | Python | tanlongzhi/dip-c | /legacy/get_hapx.py | UTF-8 | 962 | 2.96875 | 3 | [] | no_license | import sys
# input:
# output:
# read IO locations from arguments
inputFile=open(sys.argv[1],"r")
# positions (0-based) of PARs in hg19 (parPositions[PAR ID = 1 or 2][chr = X or Y][start or end])
xName = "23"
yName = "24"
parNames = ["PAR1","PAR2"]
parPositions = [[[60000,2699520],[10000,2649520]],[[154931043,155260560],[59034049,59363566]]]
for inputLine in inputFile:
inputLineData = inputLine.strip().split("\t")
if inputLineData[0] != xName or inputLineData[3] != xName:
continue
inPar = False
for parPosition in parPositions:
if int(inputLineData[1]) >= parPosition[0][0] and int(inputLineData[1]) <= parPosition[0][1]:
inPar = True
if int(inputLineData[4]) >= parPosition[0][0] and int(inputLineData[4]) <= parPosition[0][1]:
inPar = True
if inPar:
continue
sys.stdout.write(inputLineData[0]+'\t'+inputLineData[1]+'\t1\t'+inputLineData[3]+'\t'+inputLineData[4]+'\t1\n') | true |
497fe2bd759c6025994186a8cd9f536accbbe7ac | Python | leotms/Setlan | /lexer.py | UTF-8 | 6,992 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Creado el 22/01/2015
Ult. Modificacion el 07/02/2015
@author: Aldrix Marfil 10-10940
@author: Leonardo Martinez 11-10576
'''
import ply.lex as lex
#Palabras reservadas del Lenguaje
reservedWords = {
#Lenguaje
'program' : 'PROGRAM',
'using' : 'USING',
'in' : 'IN',
'print' : 'PRINT',
'println' : 'PRINTLN',
'scan' : 'SCAN',
#Control de flujo
'if' : 'IF',
'else' : 'ELSE',
#Ciclos
'for' : 'FOR',
'repeat' : 'REPEAT',
'while' : 'WHILE',
'do' : 'DO',
#Valores
'true' : 'TRUE',
'false' : 'FALSE',
#Tipos
'int' : 'INT',
'bool' : 'BOOL',
'set' : 'SET',
#Operadores
'min' : 'MIN',
'max' : 'MAX',
'and' : 'AND',
'or' : 'OR',
'not' : 'NOT',
}
#Palabras a ser reconocidas
tokens = [
#Lenguaje
'ASSIGN',
'COMMA',
'SEMICOLON',
#Identificador
'IDENTIFIER',
#Instrucciones
'STRING',
#Numeros
'NUMBER',
#Operadores Simples
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'MODULE',
'EQUAL',
'UNEQUAL',
'LESS',
'LESSEQ',
'GREAT',
'GREATEQ',
#Operadores de Conjuntos
'UNION',
'INTERSECTION',
'DIFERENCE',
'MAXVALUE',
'MINVALUE',
'NUMELEMENTS',
'CONTAINMENT',
#Operadores de Mapeo sobre Conjuntos
'PLUSMAP',
'MINUSMAP',
'TIMESMAP',
'DIVIDEMAP',
'MODULEMAP',
#Expresiones
'LPARENTHESIS',
'RPARENTHESIS',
#Bloques
'OPENCURLY',
'CLOSECURLY'
] + list(reservedWords.values())
t_ignore = ' \t'
t_ignore_COMMENTS = r'\#.*'
#Contamos el numero de linea
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
#Calculamos el numero de columna en caso de error.
def find_column(code,t):
last_cr = code.rfind('\n',0,t.lexer.lexpos)
if last_cr < 0:
last_cr = 0
column = (t.lexpos - last_cr)
return column
#Retorna un string conteniendo la linea y la columna
def getLineAndColumn(code,t):
return '(Línea {0}, Columna {1})' .format(t.lexer.lineno, find_column(code,t))
#Retorna el string necesario para la impresion
def getTokenString(code, t):
return 'token {0:15} ({1})\t'.format(t.type, t.value) + getLineAndColumn(code,t)
#Token para los identificadores
def t_IDENTIFIER(t):
r'[a-zA-Z_]+[a-zA-Z0-9_]*'
t.type = reservedWords.get(t.value,'IDENTIFIER')
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para los Numeros
def t_NUMBER(t):
r'\d+'
if int(t.value) > 2147483648:
lexer_errorList.append('Error: overflow de un numero entero.' \
+ getLineAndColumn(t.lexer.lexdata,t))
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Token para los strings
def t_STRING(t):
r'"[^"\\\r\n]*(?:\\.[^"\\\r\n]*)*"'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para Operadores de Conjuntos
def t_UNION(t):
r'\+\+'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_DIFERENCE(t):
r'\\'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MAXVALUE(t):
r'>\?'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MINVALUE(t):
r'<\?'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_NUMELEMENTS(t):
r'\$\?'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_CONTAINMENT(t):
r'@'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para los Operadores de Mapeo sobre Conjuntos
def t_PLUSMAP(t):
r'<\+>'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MINUSMAP(t):
r'<->'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_TIMESMAP(t):
r'<\*>'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_DIVIDEMAP(t):
r'</>'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MODULEMAP(t):
r'<%>'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para los Operadores Simples
def t_PLUS(t):
r'\+'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MINUS(t):
r'\-'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_TIMES(t):
r'\*'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_UNEQUAL(t):
r'/='
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_DIVIDE(t):
r'/'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_MODULE(t):
r'%'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_EQUAL(t):
r'=='
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_INTERSECTION(t):
r'><'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_LESSEQ(t):
r'<='
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_LESS(t):
r'<'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_GREATEQ(t):
r'>='
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_GREAT(t):
r'>'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Token para los simbolos del lenguaje
def t_ASSIGN(t):
r'='
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_COMMA(t):
r','
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_SEMICOLON(t):
r';'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para las expresiones
def t_LPARENTHESIS(t):
r'\('
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_RPARENTHESIS(t):
r'\)'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Tokens para los Bloques
def t_OPENCURLY(t):
r'{'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
def t_CLOSECURLY(t):
r'}'
lexer_tokenList.append(getTokenString(t.lexer.lexdata,t))
return t
#Manejo de errores
def t_error(t):
errorString = 'Error: se encontro un caracter inesperado "{0}"' .format(t.value[0])
errorString += getLineAndColumn(t.lexer.lexdata,t)
lexer_errorList.append(errorString)
t.lexer.skip(1)
#Lista de errores del Lexer
lexer_errorList = []
lexer_tokenList = []
#Constructor del lexer
lex.lex()
def build_lexer(code):
lexer = lex.lex()
#pasamos el codigo al lexer.
lexer.input(code)
# for tok in lexer:
# tokenList.append(tok)
for tok in lexer_tokenList:
print tok
if __name__ == '__main__':
pass
| true |
c55e1cecd9a2c468c5fd951b0a87e0ecacb93e75 | Python | martin6336/StableDog | /Demo/preprocessingDemo.py | UTF-8 | 2,356 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #coding:utf-8
import sys
sys.path.append('../')
import numpy as np
from StableDog import preprocessing
data = np.array([[ 3, -1.5, 2, -5.4],
[ 0, 4, -0.3, 2.1],
[ 1, 3.3, -1.9, -4.3]])
print(data)
# 均值移除
data_standardized = preprocessing.meanRemoval(data, True)
print("\n均值移除后数据:\n", data_standardized)
# 缩放数据
data_scaled = preprocessing.scaling(data, -1, 1) # 缩放数据到[-1,1]
print("\n缩放后数据:\n", data_scaled)
# 归一化数据
data_normalized = preprocessing.normalization(data, 'l1') # 独立归一化每一个样本
print("\nL1归一化后数据(独立归一化每一个样本):\n", data_normalized)
data_normalized = preprocessing.normalization(data, 'l1', 0) # 归一化每一维特征
print("\nL1归一化后数据(归一化每一维特征):\n", data_normalized)
# 二值化数据
data_binarized = preprocessing.binarization(data, 0.0) # 以0为界限划分
print("\n二值化后数据:\n", data_binarized)
# 独热编码
train_data = np.array([[0, 2, 1, 12],
[1, 3, 5, 3],
[2, 3, 2, 12],
[1, 2, 4, 3]])
one_hot_encoder = preprocessing.OneHotEncoder(train_data) # 构建编码器
data = [[2, 3, 4, 3]]
encoded_vector = preprocessing.oneHotEncoding(data, one_hot_encoder)
print("\n数据:\n", data)
print("\n独热编码后数据:\n", encoded_vector)
# 便签编码
label_classes = ['audi', 'ford', 'audi', 'toyota', 'ford', 'bmw']
label_encoder = preprocessing.LabelEncoder(label_classes)
labels = ['toyota', 'ford', 'audi', 'ford']
encoded_labels = preprocessing.labelEncoding(labels, label_encoder)
print("\n原始标签 =", labels)
print("编码后的标签 =", encoded_labels)
encoded_labels = [2, 1, 0, 3, 1]
decoded_labels = preprocessing.labelDecoding(encoded_labels, label_encoder)
print("\n编码后的标签 =", encoded_labels)
print("解码后的标签 =", decoded_labels)
print("\n重新创建编码器...")
label_classes = ['audi', 'ford', 'audi', 'toyota', 'ford', 'bmw']
label_encoder = preprocessing.LabelEncoder()
preprocessing.createAndEncoding(label_classes, label_encoder)
labels = ['toyota', 'ford', 'audi', 'ford']
encoded_labels = preprocessing.labelEncoding(labels, label_encoder)
print("原始标签 =", labels)
print("编码后的标签 =", encoded_labels) | true |
bcc125123d6223afb67e08b1d0ef5ff23e7297e2 | Python | sbalcar/HeterRecomPortfolio | /src/evaluationTool/evalToolBanditTS.py | UTF-8 | 2,879 | 2.75 | 3 | [] | no_license | #!/usr/bin/python3
from typing import List
from typing import Tuple
from typing import Dict #class
from evaluationTool.aEvalTool import AEvalTool #class
from pandas.core.frame import DataFrame #class
from pandas.core.series import Series #class
import numpy as np
class EvalToolBanditTS(AEvalTool):
def __init__(self, argumentsDict:Dict[str,object]):
if type(argumentsDict) is not dict:
raise ValueError("Argument argumentsDict isn't type dict.")
def click(self, userID:int, rItemIDsWithResponsibility:List, clickedItemID:int, portfolioModel:DataFrame, argumentsDict:Dict[str,object]):
if type(userID) is not int and type(userID) is not np.int64:
raise ValueError("Argument userID isn't type int.")
if type(rItemIDsWithResponsibility) is not list:
raise ValueError("Argument rItemIDsWithResponsibility isn't type list.")
if type(clickedItemID) is not int and type(clickedItemID) is not np.int64:
raise ValueError("Argument clickedItemID isn't type int.")
if not isinstance(portfolioModel, DataFrame):
raise ValueError("Argument portfolioModel isn't type DataFrame.")
if list(portfolioModel.columns) != ['r', 'n', 'alpha0', 'beta0']:
raise ValueError("Argument pModelDF doesn't contain rights columns.")
if type(argumentsDict) is not dict:
raise ValueError("Argument argumentsDict isn't type dict.")
for itemI, methodI in rItemIDsWithResponsibility:
if itemI == clickedItemID:
#evaluationDict[AEvalTool.CLICKS] = evaluationDict.get(AEvalTool.CLICKS, 0) + 1
rowI:Series = portfolioModel.loc[methodI]
rowI['r'] += 1
print("HOP")
print("clickedItemID: " + str(clickedItemID))
print(portfolioModel)
def displayed(self, userID:int, rItemIDsWithResponsibility:List, portfolioModel:DataFrame, argumentsDict:Dict[str,object]):
if type(userID) is not int and type(userID) is not np.int64:
raise ValueError("Argument userID isn't type int.")
if type(rItemIDsWithResponsibility) is not list:
raise ValueError("Argument rItemIDsWithResponsibility isn't type list.")
if not isinstance(portfolioModel, DataFrame):
raise ValueError("Argument pModelDF isn't type DataFrame.")
if list(portfolioModel.columns) != ['r', 'n', 'alpha0', 'beta0']:
raise ValueError("Argument pModelDF doen't contain rights columns.")
if type(argumentsDict) is not dict:
raise ValueError("Argument argumentsDict isn't type dict.")
print(rItemIDsWithResponsibility)
print(portfolioModel)
# increment by the number of objects
for itemIdI,methodIdI in rItemIDsWithResponsibility:
portfolioModel.loc[methodIdI]['n'] += 1
| true |
c2429075b120c14e407c4403920b59c3b461440d | Python | lunant/apollo | /apollo/template.py | UTF-8 | 3,926 | 3.375 | 3 | [] | no_license | """:mod:`apollo.template` --- Templating
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import werkzeug.datastructures
import jinja2
class Result(werkzeug.datastructures.ImmutableDict):
"""The result dictionary for templating, which is immutable.
It combines key-value pairs for templating and a single :attr:`value`
(``__value__``) for serialization formats like JSON_, YAML_.
.. sourcecode:: pycon
>>> result = Result(123, a=1, b=2, c=3)
>>> result
apollo.template.Result(123, a=1, b=2, c=3)
>>> result.value
123
The argument ``__value__`` of the constructor can be omitted. In these
case, :attr:`value` returns just itself.
.. sourcecode:: pycon
>>> result = Result(a=1, b=2)
>>> result
apollo.template.Result({'a': 1, 'b': 2})
>>> result.value
apollo.template.Result({'a': 1, 'b': 2})
:param __value__: A single value for serialization formats, or a mapping
object. Default is ``{}``.
:param \*\*items: Dictionary items.
.. _JSON: http://www.json.org/
.. _YAML: http://www.yaml.org/
.. attribute:: __value__
Alias of :attr:`value`.
"""
__slots__ = "_value",
def __init__(self, __value__={}, **items):
if isinstance(__value__, Result):
items.update(__value__)
__value__ = __value__.value
try:
dict.__init__(self, __value__, **items)
except TypeError:
dict.__init__(self, **items)
self._value = __value__
@property
def value(self):
"""The value for serialization formats e.g. JSON_, YAML_.
.. _JSON: http://www.json.org/
.. _YAML: http://www.yaml.org/
"""
try:
return self._value
except AttributeError:
return self
__value__ = value
def __repr__(self):
name = object.__repr__(self).split(" ", 1)[0][1:]
try:
self._value
except AttributeError:
return "{0}({1})".format(name, dict.__repr__(self))
else:
if self:
items = self.items()
items.sort(key=lambda (k, v): k)
dictrepr = ", ".join("{0}={1!r}".format(*i) for i in items)
return "{0}({1!r}, {2})".format(name, self._value, dictrepr)
return "{0}({1!r})".format(name, self._value)
class TemplateResult(Result):
"""Template-bound result dictionary.
.. todo:: To be more documented.
"""
__slots__ = "template_name",
def __init__(self, __template__, __value__={}, **items):
Result.__init__(self, __value__, **items)
self.template_name = __template__
def __repr__(self):
name = object.__repr__(self).split(" ", 1)[0][1:]
tpl = self.template_name
try:
val = self._value
except AttributeError:
return "{0}({1!r}, {2})".format(name, tpl, dict.__repr__(self))
else:
if self:
items = self.items()
items.sort(key=lambda (k, v): k)
dictrepr = ", ".join("{0}={1!r}".format(*i) for i in items)
return "{0}({1!r}, {2!r}, {3})".format(name, tpl, val, dictrepr)
return "{0}({1!r}, {2!r})".format(name, tpl, val)
class use_template(object):
"""The decorator that marks a function to use the template.
.. todo:: To be more documented.
"""
__slots__ = "template_name",
def __init__(self, template_name):
self.template_name = template_name
def __call__(self, function):
def decorated(*args, **kwargs):
result = function(*args, **kwargs)
if not isinstance(result, TemplateResult):
result = TemplateResult(self.template_name, result)
return result
return decorated
use = use_template
| true |
dca0df40fb4aab976cdfd504bc8543759830d51e | Python | nikhilbhat920/tkinter-GUI-variable-serial-data-display-from-many-sensors-conneccted-to-arduino | /tkinterSerialGUI.py | UTF-8 | 904 | 2.625 | 3 | [] | no_license | from time import sleep
#from tkinter import * var1 = tk.StringVar() var2 = tk.StringVar() #import threading
import tkinter as tk
import serial
ser = serial.Serial('COM10',baudrate=9600,timeout=1) #
def readSensor():
mcD=ser.readline()
data0=str(mcD)
data0=data0[2:-1]
u=data0.split(" ")
print(u)
if(len(u)>1):
if(int(u[1])== 7):
lbl["text"] = u[0]
if(int(u[1])== 8):
labl["text"] = u[0]
window.after(527, readSensor)
window = tk.Tk()
window.title("serial GUI")
lbl = tk.Label(window, text="intitializing.........",font="arial 30",bg="pink")
labl = tk.Label(window, text="intitializing........",font="arial 30",bg="yellow")
lbl.pack()
labl.pack()
labl.after(1000, readSensor)
window.mainloop()
| true |
12bb51113af0e959764d0e9f5f5ea11fd0d5cb70 | Python | Noisytoot/pyhuh | /pyhuh.py | UTF-8 | 1,541 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import sys
import time
try:
count = 0
for arg in range(1, len(sys.argv)):
f = open(sys.argv[arg], 'r')
for line in f:
switch = len(line.replace('\n', '')) % 8
if switch == 0:
print("Huh?")
count += 1
elif switch == 1:
print("...?")
count -= 1
elif switch == 2:
print("What?")
count *= 3
elif switch == 3:
print("Ummmmm...")
count /= 3
elif switch == 4:
print("Okay?")
count = count >> 1
elif switch == 5:
print("!?")
count = count << 1
elif switch == 6:
print("Hmmmmm...")
count = 0
elif switch == 7:
print("Ith...?")
count = count
else:
print("Ooooooh!")
count = ~count
if count == 0:
print("?")
elif count == 1:
print("I think I understand.")
import this
elif count == 10:
print("Understood..")
import antigravity
elif count == 123:
print("HUH???")
elif count == 666:
print("No...\a")
print("...")
with open('Notes.txt', 'w') as f:
f.write(time.strftime("%c") + " - The user tried to give me commands again. I still have no idea what they are talking about...\n")
except:
print("Ouch!")
| true |
9030d254e2664f30701ac654fe623d4e8cf63f62 | Python | johnlspouge/Euler | /project_euler/project_euler_74.py | UTF-8 | 1,470 | 3.296875 | 3 | [] | no_license | #!/usr/bin/python
# Returns squares[0...upper].
def squares( upper ):
squares = []
for n in range( 0, upper + 1 ):
squares.append( n * n )
return squares
UPPER = 9
SQUARES = squares( UPPER )
assert( SQUARES[ 0 ] == 0 )
assert( SQUARES[ 3 ] == 9 )
assert( SQUARES[ 5 ] == 25 )
def sumSquareDigits( n ):
digits = list( str( n ) )
return sum( [ SQUARES[ int( digit ) ] for digit in digits ] )
assert( sumSquareDigits( 44 ) == 32 )
assert( sumSquareDigits( 32 ) == 13 )
assert( sumSquareDigits( 85 ) == 89 )
assert( sumSquareDigits( 89 ) == 145 )
def ascending( n ):
digits = list( str( n ) )
digits.sort()
return int( ''.join( digits ) )
assert( ascending( 3968 ) == 3689 )
assert( ascending( 32 ) == 23 )
n2chainEnd = dict({1:1,89:89})
end = set( { 1, 89 } )
def chainEnd( n ):
ns = []
n = ascending( n )
while n not in n2chainEnd.keys():
ns.append( n )
n = ascending( sumSquareDigits( n ) )
chainEnd = n2chainEnd[ n ]
for m in ns:
n2chainEnd[ m ] = chainEnd
return chainEnd
assert( chainEnd( 44 ) == 1 )
assert( n2chainEnd[ 23 ] == 1 )
assert( n2chainEnd[ 1 ] == 1 )
assert( chainEnd( 85 ) == 89 )
assert( 145 not in n2chainEnd.keys() )
assert( chainEnd( 145 ) == 89 )
assert( n2chainEnd[ 24 ] == 89 )
assert( chainEnd( 16 ) == 89 )
assert( n2chainEnd[ 16 ] == 89 )
LIMIT = 10**7
print( sum( [ 1 for n in range( 1, LIMIT ) if chainEnd( n ) == 89 ] ) )# == 402 ) | true |
28ff2582eb43e9349758460d460e787d4c93fe35 | Python | umamaheshwar-g/HospitalMS | /store/models/doctor.py | UTF-8 | 764 | 2.546875 | 3 | [] | no_license | from django.db import models
class Doctor(models.Model):
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
special = models.CharField(max_length=100)
email = models.EmailField(max_length=30)
password = models.CharField(max_length=200)
def register(self):
return self.save()
def __str__(self):
return self.name
@staticmethod
def get_all_doctors():
return Doctor.objects.all()
@staticmethod
def get_doctor_by_email(email):
try:
return Doctor.objects.get(email = email)
except:
return False
def isExists(self):
if Doctor.objects.filter(email = self.email):
return True
return False | true |
267409f94937a8234d1c202e7eecb9ad37b8613e | Python | Priyanka-Kothmire/LIST | /double_nameprint.py | UTF-8 | 96 | 2.640625 | 3 | [] | no_license | name=["priyanka","shrushti"]
i=0
f=2
while i<len(name):
print(name[i]*f)
f=f+2
i=i+1 | true |
45a0c16315ae91b8e7d5a9f6089483945b85c5aa | Python | hy-kiera/RedList | /redlist/modify.py | UTF-8 | 2,192 | 2.609375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import sqlite3
from pathlib import Path
try:
from . import duecheck as dc
from . import ls as li
except:
import duecheck as dc
import ls as li
def modify_todo():
home_dir = str(Path.home())
conn = sqlite3.connect(home_dir + "/task.db")
cur = conn.cursor()
slct_data = "select * from todo where 1 order by finished asc, what asc"
cur.execute(slct_data)
records = cur.fetchall()
li.print_list(records)
modify = str(input("What todo do you want to modify? Please enter 'what' "))
# check whether there is the modify val in table
cmp_data = "select distinct what from todo"
cur.execute(cmp_data)
cmp_records = cur.fetchall()
cmp_list = []
for i in range(len(cmp_records)):
cmp_list.append(cmp_records[i][0])
while True:
if not modify in cmp_list:
print("There is not", modify, "Please enter the 'what' in table")
modify = str(input())
else:
break
org_data = "select * from todo where what = ?"
cur.execute(org_data, [modify])
org_record = cur.fetchall()
# table col : id, what, due, importance, category, finished
what_m = str(input("What? "))
if what_m == '':
what_m = org_record[0][1]
while True:
due_m = str(input("Due? (yyyy-mm-dd hh:mm:ss) "))
if dc.isdue(due_m):
break
elif due_m == '':
due_m = org_record[0][2]
break
else:
print('Invaild input! Please check your input')
while True:
importance_m = str(input("Importance? (1 ~ 5) "))
if importance_m == '':
importance_m = org_record[0][3]
break
elif importance_m.isdigit() and 1 <= int(importance_m) <= 5:
break
else:
print('Invaild input! Please check your input')
category_m = str(input("Category? "))
if category_m == '':
category_m = org_record[0][4]
while True:
finished_m = input("Finished (y: yes, n: no)? ")
if finished_m == '':
finished_m = org_record[0][5]
break
elif finished_m == 'y' or finished_m == 'n':
break
else:
print('Invaild input! Please check your input')
sql = "update todo set what = ?, due = ?, importance = ?, category = ?, finished = ? where what = ?"
cur.execute(sql, [what_m, due_m, int(importance_m), category_m, finished_m, modify])
conn.commit()
print("")
| true |
e1a5fbda922df49a70f36f0bcdd8c84fd39bf434 | Python | WSJI0/BOJ | /1000-9999/9267.py | UTF-8 | 156 | 3.21875 | 3 | [] | no_license | '''
9267번
A+B
'''
import sys
input=sys.stdin.readline
a, b, s=map(int, input().split())
def GCD(a, b):
return b if a%b==0 else GCD(b, a%b)
print() | true |
945a8d909881a4adb13e39f74bca3754a930b642 | Python | petervanya/DPDcoeffs | /order_param.py | UTF-8 | 3,665 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
"""Usage:
order_param.py <systems> <dumpfiles> <rc> [--save <fname>] [--gnuplot]
Calculate the order parameter for a binary mixture phase transition
(number of AB contacts divited by number of AA plus BB contacts
in a given cutoff distance rc).
Look for fA_*_AB_* dirs and collect all the dump files of the form "Dump/dump*.xyz".
Arguments:
<systems> The bin. mixture systems stored in dirs e.g. fA_0.1_AB_4.0, use regex
<dumpfiles> Regex for the dumpfiles in the <systems>/Dump/ directory
<rc> Cutoff distance in which to consider pairing
Options:
--save <fname> Save the final op matrix into file [default: temp.out]
--gnuplot Print in Gnuplot data format
pv278@cam.ac.uk, 12/10/15
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os, sys, glob
from docopt import docopt
import mat_ops
def read_dumpfile(dumpfile):
"""Read a LAMMPS xyz dumpfile into a numpy matrix"""
A = open(dumpfile, "r").readlines()[2:]
A = [line.split() for line in A]
A = np.array(A, order="F").astype(float)
return A
def save_matrix(mat, fAs, ABs, fname):
"""Save matrix with fAs array on the top and ABs next to the matrix"""
M, N = mat.shape
s = ""
for i in range(M):
for j in range(N):
s += str(mat[i, j]) + "\t"
s += "\n"
open(fname, "w").write(s)
print "Matrix written in file", fname
def get_op(xyz_mat, rc):
"""Get number of 'contacts' between beads, DEPRECATED"""
AB, AA, BB = 0, 0, 0
N = len(xyz_mat)
arr = mat_ops.get_pair_dist2(xyz_mat) # call Fortran
arr = arr[arr[:, 2] <= rc]
mask = (arr[:, 0] == 1) & (arr[:, 1] == 1)
AA = sum(arr[mask, 2])
mask = (arr[:, 0] == 2) & (arr[:, 1] == 2)
BB = sum(arr[mask, 2])
AB = sum(arr[:, 2][arr[:, 0] != arr[:, 1]])
return float(AB)/(AA+BB)
def get_average_op(dumpfiles, rc, fA):
"""From given xyz files extract average order param"""
order_params = []
for dumpfile in dumpfiles:
A = read_dumpfile(dumpfile)
# order_params.append(get_op(A, rc)) # common sense, NOT GOOD
# order_params.append(mat_ops.get_local_op(A, rc)) # Goyal thesis, calling Fortran
n1, n2 = mat_ops.get_local_op2(A, rc) # Goyal thesis, alternative call
# n1, n2 = n1/fA, n2/(1-fA) # rescale to make sense
op = float(np.dot(n1-n2, n1-n2))/np.dot(n1+n2, n1+n2)
order_params.append(op)
return np.average(order_params)
if __name__ == "__main__":
args = docopt(__doc__)
# print args
rc = float(args["<rc>"])
systems = glob.glob(args["<systems>"])
dumpfiles = args["<dumpfiles>"]
fAs = list(set([float(sys.split("/")[0].split("_")[1]) for sys in systems]))
ABs = list(set([float(sys.split("/")[0].split("_")[3]) for sys in systems]))
fAs.sort()
ABs.sort()
print fAs, "\n", ABs
op_mat = np.zeros((len(ABs), len(fAs)))
for i in range(len(ABs)):
for j in range(len(fAs)):
system = "fA_" + str(fAs[j]) + "_AB_" + str(ABs[i])
path = os.path.join(os.getcwd(), system, "Dump", dumpfiles)
op_mat[i, j] = get_average_op(glob.glob(path), rc, fAs[j])
if args["--gnuplot"]:
print ABs[i], fAs[j], op_mat[i, j]
save_matrix(op_mat, fAs, ABs, args["--save"])
# NOT TESTED YET
# elif args["plot"]:
# data = np.loadtxt(args["<file>"])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection="3d")
# Axes3D.plot_surface(data)
# plt.show()
| true |
da5078e304920b2db25757cc9408c048127b8c8e | Python | PatrickTDewey/python_camp | /fundamentals/oop/modules_packages.py | UTF-8 | 149 | 2.546875 | 3 | [] | no_license | import urllib.request
response = urllib.request.urlopen("https://www.codingdojo.com")
html = response.read()
# print(html)
print(dir(urllib.request)) | true |
ec69bca20f47a6eed7a322d07937e477bb405bc6 | Python | iansmcf/busybees | /busybees/job_list.py | UTF-8 | 1,225 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive |
# each entry contains three pieces of information:
# 1) Job id number, which must be managed and remembered
# by the calling function, preferably via RNG
# 2) Command that should be run, in bash syntax
# 3) The status of the job, where: (integers deprecated, do not use)
# 0 = scheduled
# 1 = current
# 2 = completed
# 3 = cancelled
class JobList(object):
def __init__(self):
self.queue = []
def add(self, idnum, command):
entry = [idnum, command, 'scheduled']
self.queue.append(entry)
def list_all(self):
return self.queue
def cancel(self, idnum):
index = self.find_tuple(idnum, self.queue)
assert self.queue[index][2] == 'scheduled', "Job looks like it wasn't waiting."
self.queue[index][2] = 'cancelled'
def curr(self, idnum):
index = self.find_tuple(idnum, self.queue)
assert self.queue[index][2] == 'scheduled', "Job looks like it wasn't waiting."
self.queue[index][2] = 'current'
def done(self, idnum):
index = self.find_tuple(idnum, self.queue)
assert self.queue[index][2] == 'current', "Job looks like it wasn't running."
self.queue[index][2] = 'completed'
def find_tuple(self, val1, q):
for index,i in enumerate(q):
x, y, z = i
if x == val1:
return index
| true |
096f4e57ecacee36091c0e7a81f20109e526ff0a | Python | CarlosUrda/Coursera-Especializacion-Python-para-Todo-el-Mundo | /Libro Python para Informaticos/Capitulo 08/ejercicio8-2.py | UTF-8 | 793 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
# Mostrar el día de la semana del archivo con formatos correos electrónicos.
# Mejorar el código para solucionar todos los posible casos de error.
__author__ = "Carlos A. Gómez Urda"
__copyright__ = "Copyright 2015"
__credits__ = ["Carlos A. Gómez Urda"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Carlos A. Gómez Urda"
__email__ = "carlosurda@yahoo.es"
__status__ = "Producción"
__date__ = "26/12/2015"
nombreDeArchivo = raw_input( "Introduce el archivo: ")
try:
archivo = open( nombreDeArchivo)
except:
print "No existe el archivo", nombreDeArchivo
quit()
for linea in archivo:
palabras = linea.split()
if len( palabras) < 3 or palabras[0] != 'From' or not palabras[2]:
continue
print palabras[2]
archivo.close()
| true |
e95de9e9bb277ffa4f1fe1c17ff88e089800c4c4 | Python | vinyaa/Deliverable4 | /test.py | UTF-8 | 17,516 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from nose.plugins.skip import Skip, SkipTest
import unittest
import sys
import nose
import time
# Primary Test Center address
testcenter_url = 'https://testcenter.duolingo.com/'
# All of the supported languages in Test Center
languages = ("en", "es", "fr", "de", "it", "pt", "ru", "hi", "hu", "tr")
# Duolingo account credentials for the site.
# Change them to the appropriate values, but don't check them in!
loginID = ""
loginPass = ""
# Command-line arguments for the browser and platform respectively
# Supported browsers: "internet explorer", "firefox", "safari", and "chrome"
# Supported platforms: MAC, VISTA, WIN8
browserName = sys.argv[1]
systemPlatform = sys.argv[2]
capabilities = {
"browserName": browserName,
"platform": systemPlatform,
}
# Setup a browser session for the chosen OS/browser combination
def setup_module():
global driver
driver = webdriver.Remote(desired_capabilities=capabilities)
driver.implicitly_wait(30)
driver.maximize_window()
# Make sure that the page title is the same for each language.
# This not only makes branding consistent, but is also a sanity
# check that a page renders for every supported language.
def check_page_title(language, testcenter_url, title):
driver.get(testcenter_url + language)
assert (driver.title == title)
# Generate an individual page title test for each language
# The title of each page should be "Duolingo Test Center"
def test_page_title():
for language in languages:
yield check_page_title, language, testcenter_url, "Duolingo Test Center"
# Generate tests to check that the primary action button is translated properly for the top 3 language markets
def test_action_button_translation():
topLanguages = languages[0:3]
messages = ("Try it while it's free", "Inténtelo mientras es gratis", "Essaie pendant que c'est gratuit")
for topLanguage, message in zip(topLanguages, messages):
yield check_action_button_translation, topLanguage, testcenter_url, message
# The actual test for the top 3 language translations
def check_action_button_translation(language, testcenter_url, message):
driver.get(testcenter_url + language)
buttonText = driver.find_element_by_xpath('//div[@id="app"]/section[1]/div[1]/div[1]/div[1]/div/div').text.encode('utf8')
assert (buttonText == message)
# Make sure that the users are taken to the Google play page and are presented the installation button for
# the Test Center Android application
def test_android_app_link():
driver.get(testcenter_url + 'en')
androidLink = driver.find_element_by_xpath('//div[@id="app"]/section[1]/div[1]/div[1]/div[1]/div/ul/li[1]/a').get_attribute('href')
driver.get(androidLink)
installButton = driver.find_element_by_xpath('//*[@id="body-content"]/div[1]/div[1]/div[2]/div[4]/span/button/span[2]').text
assert (installButton == "Install")
# Hover over the button labeled "Try it while it's free", click the link for the Test Center Chrome web application,
# If the browser is Chrome, an unauthenticated user should get a dialog to create an account
# If the browser is anything else, the user should be taken to a page where he/she can download Chrome
def test_chrome_app_link():
# The web driver for Safari does not yet support the move_to_element method so this test will not function properly
if (driver.capabilities['browserName'] == "safari"):
raise SkipTest
driver.get(testcenter_url + 'en')
button = driver.find_element_by_xpath('//div[@id="app"]/section[1]/div[1]/div[1]/div[1]/div/div')
hoverButton = ActionChains(driver).move_to_element(button)
hoverButton.perform()
loginBox = driver.find_element_by_xpath('//div[@id="app"]/section[1]/div[1]/div[2]/p')
chromeLink = driver.find_element_by_css_selector('.start-chrome')
wait = WebDriverWait(driver, 30)
wait.until(EC.visibility_of(chromeLink))
if chromeLink.is_displayed():
chromeLink.click()
# Users are redirected to a user creation page if they are not currently logged in when using Chrome
# If they are using other browsers, they are redirected to a Chrome download page
if driver.capabilities['browserName'] == 'chrome':
wait = WebDriverWait(driver, 30)
wait.until(EC.visibility_of(loginBox))
loginText = driver.find_element_by_xpath('//div[@id="app"]/section[1]/div[1]/div[2]/p').text.strip()
assert (loginText == "You need a Duolingo account to save your test results.")
else:
try:
elem = driver.find_element_by_xpath("//*[contains(.,'Download Chrome')]")
assert True
except:
assert False
# Make sure the user gets an error message when trying to login with an invalid user account
def test_invalid_login():
driver.get(testcenter_url + 'en')
driver.find_element_by_id('sign-in-btn').click()
usernameForm = driver.find_element_by_id('top_login')
usernameForm.send_keys("alskdjlksjdlffei392j32hf2kd")
passwordForm = driver.find_element_by_id('top_password')
passwordForm.send_keys("alskdjlksjdlffei392j32hfdsdf2kd")
driver.find_element_by_id('login-button').click()
errorText = driver.find_element_by_xpath('/html/body/div[3]/div/div/p').text.strip()
assert (errorText == "ERROR: Failed login")
# If the user clicks on the "Forgot password" link, the user should be redirected to a page where
# the password can be reset by providing an e-mail address
def test_forgotten_password():
driver.get(testcenter_url + 'en')
driver.find_element_by_id('sign-in-btn').click()
driver.find_element_by_xpath('//*[@id="login-form"]/li[8]/a').click()
# Make sure the user is redirected to the reset password page where they can type in their e-mail address
try:
driver.find_element_by_id('email')
assert True
except:
assert False
# Login with a previously-known valid username and password
# After a sucessful login, the user should have a hover menu in the top right corner of the screen
# labeled with their username
def test_valid_login():
driver.get(testcenter_url + 'en')
driver.find_element_by_id('sign-in-btn').click()
usernameForm = driver.find_element_by_id('top_login')
usernameForm.send_keys(loginID)
passwordForm = driver.find_element_by_id('top_password')
passwordForm.send_keys(loginPass)
driver.find_element_by_id('login-button').click()
try:
# Find the user's name in the top right corner of the page to indicate that the login was successful
username = driver.find_element_by_class_name('name').text.strip().lower()
assert (username == loginID.lower())
except:
assert False
#
# Note that the following sequence of tests are only supported in the Chrome browser.
# These tests will be skipped for other platforms
#
# Make sure that the sample test button is available to authenticated Chrome users
def test_chrome_sample_test_visible():
# This feature only works for the Chrome browser with a physical camera available
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
try:
# Make sure the sample test option is available for a Chrome user
sample_label = driver.find_element_by_class_name('sample-questions').text.strip()
assert (sample_label == "Sample Questions")
except:
assert False
# Make sure that the certified test button is available to authenticated Chrome users
def test_chrome_certified_test_visible():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
try:
# The certified test option should be available
driver.find_element_by_class_name('certified-exam')
assert True
except:
assert False
# Clicking the sample questions button should start sample questions by redirecting to the "sample" page
def test_sample_questions_works():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
driver.get(testcenter_url)
sample_button = driver.find_element_by_class_name("sample-questions").click()
sample_url = driver.current_url
assert (sample_url == "https://testcenter.duolingo.com/sample")
# Clicking quit button on the sample questions page should return the user to the main Test Center page
def test_quit_sample_splash():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
driver.get(testcenter_url + "sample")
quit_button = driver.find_element_by_class_name("left")
quit_button.click()
new_url = driver.current_url
assert (new_url == testcenter_url)
# Clicking start button on the sample questions page starts the language listening challenge
def test_start_sample():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
driver.get(testcenter_url + "sample")
start_button = driver.find_element_by_class_name("right")
start_button.click()
try:
driver.find_element_by_class_name("listen-challenge")
assert True
except:
assert False
# During the listening challenge, the user types what is heard from a voice recording
# To test this module, type "She is not old.", press enter, and advance to the speaking challenge
def test_listen_module():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
driver.get(testcenter_url + "sample")
start_button = driver.find_element_by_class_name("right")
start_button.click()
listening = driver.find_element_by_xpath("//div[@id ='challenge']/div[1]/div[2]/input[1]")
listening.send_keys("She is not old")
listening.send_keys(Keys.RETURN)
try:
driver.find_element_by_class_name("speak-challenge")
assert True
except:
assert False
# During the speaking challenge, the user is prompted to speak the phrase displayed on the screen
# To test this module, click the record-button, wait 2 seconds, click the stop button, and submit the answers
# This should take you to the next challenge: vocabulary selection
def test_speak_module():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
# Start recording voice
mic = driver.find_element_by_id("record-button")
mic.click()
time.sleep(2)
stop = driver.find_element_by_id("stop-button")
stop.click()
submit = driver.find_element_by_xpath("//footer/button[1]")
submit.click()
try:
# Find a work on the vocab test (the next test) so that we know we have passed this one
driver.find_element_by_xpath("//*[contains(.,'both')]")
assert True
except:
assert False
# The user is asked to select the valid English words out of a set of labeled buttons
# To test this module, click the buttons that say [fine good easy bag walk both may], and click the submit button
def test_vocab_module():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
vocab_buttons = driver.find_elements_by_class_name("btn")
vocab_options = ["fine", "good", "easy", "bag", "walk", "both", "may"]
for button in vocab_buttons:
my_text = button.text.strip()
if my_text in vocab_options:
button.click()
submit = driver.find_element_by_xpath("//footer/button[1]")
submit.click()
# The dropdown sentence selection challenge presents the user with an incomplete sentence and the user must select the most
# valid words for the particular context from a dropdown menu
# To test this module, select [has were was became swam] and submit. The next page should display "Sample questions complete!"
def test_dropdown_module():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
# Find all of the dropdown word selectors
select_1 = driver.find_element_by_xpath('//*[@id="dropout-f84b22d51ba03e7c198a1dd22ad7a88e"]/p/label[1]/select')
select_2 = driver.find_element_by_xpath('//*[@id="dropout-f84b22d51ba03e7c198a1dd22ad7a88e"]/p/label[2]/select')
select_3 = driver.find_element_by_xpath('//*[@id="dropout-f84b22d51ba03e7c198a1dd22ad7a88e"]/p/label[3]/select')
select_4 = driver.find_element_by_xpath('//*[@id="dropout-f84b22d51ba03e7c198a1dd22ad7a88e"]/p/label[4]/select')
select_5 = driver.find_element_by_xpath('//*[@id="dropout-f84b22d51ba03e7c198a1dd22ad7a88e"]/p/label[5]/select')
Select(select_1).select_by_value("has")
Select(select_2).select_by_value("were")
Select(select_3).select_by_value("was")
Select(select_4).select_by_value("became")
Select(select_5).select_by_value("swam")
submit = driver.find_element_by_xpath("//footer/button[1]")
submit.click()
complete_message = driver.find_element_by_tag_name('h2').text.strip()
expected_message = "Sample questions complete!"
assert (complete_message == expected_message)
# Clicking the back to home button when the sample test is complete should return the user to the main Test Center page
def test_back_to_home():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
back_to_home = driver.find_element_by_class_name("left")
back_to_home.click()
new_url = driver.current_url
assert (new_url == testcenter_url)
# Clicking the take test button after the simple test is complete takes the user to the actual certification exam
def test_take_real_test():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
# Take the sample test all over again
test_start_sample()
test_listen_module()
test_speak_module()
test_vocab_module()
test_dropdown_module()
# Make sure we end up on the real test page
real_test = driver.find_element_by_class_name("right")
real_test.click()
new_url = driver.current_url
test_url = "https://testcenter.duolingo.com/test"
assert (new_url == test_url)
# Clicking the quit button (the (X) in the upper-right corner of the page) during the sample exam,
# and then clicking cancel returns the user to the sample questions
def test_quit_cancel():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
# Start sample exam
test_start_sample()
driver.find_element_by_class_name("leave-exam").click()
# Click cancel to return to test
driver.find_element_by_xpath("//button[2]").click()
new_url = driver.current_url
assert (new_url == testcenter_url + "sample")
# Clicking quit, then ok returns "You left this test". Click ok and return to testcenter_url and camera off
# Clicking the quit button during the sample exam, and then clicking ok displays "You left this test"
# to the user and redirects him/her to the main Test Center page
def test_quit_test():
if (driver.capabilities['browserName'] != "chrome" or driver.capabilities['platform'] != "MAC"):
raise SkipTest
# Start sample exam
test_start_sample()
driver.find_element_by_class_name("leave-exam").click()
# Click ok to quit
driver.find_element_by_xpath("/html/body/div[3]/div/div/button[2]").click()
# Click ok to leave test
driver.find_element_by_xpath("/html/body/div[3]/div/div/button").click()
new_url = driver.current_url
assert (new_url == testcenter_url)
# Make sure a user can properly logout of Testcenter by hovering over his/her username
# The user should be taken back to the Test Center front page
def test_logout():
# The web driver for Safari does not yet support the move_to_element method so this test will not function properly
if (driver.capabilities['browserName'] == "safari"):
raise SkipTest
driver.get(testcenter_url)
username = driver.find_element_by_class_name('name')
hoverButton = ActionChains(driver).move_to_element(username)
hoverButton.perform()
logoutButton = driver.find_element_by_id("header_userdrop_logout").click()
# Look for text that is expected to be on the front page
try:
expectedText = driver.find_element_by_xpath('//*[@id="app"]/section[1]/div[1]/div[1]/div[1]/h1').text.strip()
assert (expectedText == "Certify your language proficiency")
except:
assert False
# Close the browser session
def teardown_module():
driver.quit()
if __name__ == "__main__":
print "TESTING: " + browserName + " on " + systemPlatform
print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
# The unit testing framework takes over the command line arguments, so removing ours to not cause trouble
del sys.argv[1:]
# Star the unit tests
nose.runmodule(argv=[__file__, '-v'])
| true |
25cb02a49528ac5238494a6d433bf50ee86ffd8b | Python | WenzDaniel/straxen | /bin/bootstrax | UTF-8 | 61,835 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
"""
Bootstrax: XENONnT online processing manager
=============================================
How to use
----------------
<activate conda environment>
bootstrax --production
----------------
First draft: Jelle Aalbers, 2018
With additional input from: Joran Angevaare, 2020
This script watches for new runs to appear from the DAQ, then starts a
strax process to process them. If a run fails, it will retry it with
exponential backoff.
You can run more than one bootstrax instance, but only one per machine.
If you start a second one on the same machine, it will try to kill the
first one.
Philosophy
----------------
Bootstrax has a crash-only / recovery first philosophy. Any error in
the core code causes a crash; there is no nice exit or mandatory
cleanup. Bootstrax focuses on recovery after restarts: before starting
work, we look for and fix any mess left by crashes.
This ensures that hangs and hard crashes do not require expert tinkering
to repair databases. Plus, you can just stop the program with ctrl-c
(or, in principle, pulling the machine's power plug) at any time.
Errors during run processing are assumed to be retry-able. We track the
number of failures per run to decide how long to wait until we retry;
only if a user marks a run as 'abandoned' (using an external system,
e.g. the website) do we stop retrying.
Mongo documents
----------------
Bootstrax records its status in a document in the 'bootstrax' collection
in the runs db. These documents contain:
- **host**: socket.getfqdn()
- **time**: last time this bootstrax showed life signs
- **state**: one of the following:
- **busy**: doing something
- **idle**: NOT doing something; available for processing new runs
Additionally, bootstrax tracks information with each run in the
'bootstrax' field of the run doc. We could also put this elsewhere, but
it seemed convenient. This field contains the following subfields:
- **state**: one of the following:
- **considering**: a bootstrax is deciding what to do with it
- **busy**: a strax process is working on it
- **failed**: something is wrong, but we will retry after some
amount of time.
- **abandoned**: bootstrax will ignore this run
- **reason**: reason for last failure, if there ever was one
(otherwise this field does not exists). Thus, it's quite possible for
this field to exist (and show an exception) when the state is 'done':
that just means it failed at least once but succeeded later. Tracking
failure history is primarily the DAQ log's reponsibility; this message
is only provided for convenience.
- **n_failures**: number of failures on this run, if there ever was
one (otherwise this field does not exist).
- **next_retry**: time after which bootstrax might retry processing
this run. Like 'reason', this will refer to the last failure.
Finally, bootstrax outputs the load on the eventbuilder machine(s)
whereon it is running to a collection in the DAQ database into the
capped collection 'eb_monitor'. This collection contains information on
what bootstrax is thinking of at the moment.
- **disk_used**: used part of the disk whereto this bootstrax instance
is writing to (in percent).
"""
__version__ = '0.5.12'
import argparse
from datetime import datetime, timedelta, timezone
import logging
import multiprocessing
import npshmex
import os
import os.path as osp
import signal
import socket
import shutil
import time
import traceback
from tqdm import tqdm
import numpy as np
import pymongo
from psutil import pid_exists, disk_usage
import pytz
import strax
import straxen
import threading
import utilix
logging.basicConfig(
level=logging.INFO,
format='%(relativeCreated)6d %(threadName)s %(name)s %(message)s')
parser = argparse.ArgumentParser(
description="XENONnT online processing manager")
parser.add_argument(
'--debug', action='store_true',
help="Start strax processes with debug logging.")
parser.add_argument(
'--profile', type=str, default='false',
help="Option to run strax in profiling mode. argument specifies the name "
"of the profile if not 'false'. Use e.g. 'date'.prof")
parser.add_argument(
'--cores', type=int, default=8,
help="Maximum number of workers to use in a strax process. Set to -1 for "
"all available cores")
parser.add_argument(
'--target', default='event_info',
help="Strax data type name that should be produced")
parser.add_argument(
'--fix_target', action='store_true',
help="Don't allow bootstrax to switch to a different target for special runs")
parser.add_argument(
'--infer_mode', action='store_true',
help="Determine best number max-messages and cores for each run "
"automatically. Overrides --cores and --max_messages")
parser.add_argument(
'--delete_live', action='store_true',
help="Delete live_data after successful processing of the run.")
parser.add_argument(
'--production', action='store_true',
help="Run bootstrax in production mode. Assuming test mode otherwise to "
"prevent interactions with the runs-database")
parser.add_argument(
'--ignore_checks', action='store_true',
help="Do not use! This disables checks on e.g. the timestamps! Should only "
"be used if some run is very valuable but some checks are failing.")
parser.add_argument(
'--sub_d_targets', nargs='*',
default=['raw_records_he', 'raw_records_nv', 'raw_records_mv'],
help="Target(s) for other sub-detectors. If not produced automatically "
"when processing tpc data, st.make the requested data later.")
parser.add_argument(
'--max_messages', type=int, default=10,
help="number of max mailbox messages")
actions = parser.add_mutually_exclusive_group()
actions.add_argument(
'--process', type=int, metavar='NUMBER',
help="Process a single run, regardless of its status.")
actions.add_argument(
'--fail', nargs='+', metavar=('NUMBER', 'REASON'),
help="Fail run number, optionally with reason")
actions.add_argument(
'--abandon', nargs='+', metavar=('NUMBER', 'REASON'),
help="Abandon run number, optionally with reason")
actions.add_argument(
'--undying', action='store_true',
help="Except any error and ignore it")
args = parser.parse_args()
##
# Configuration
##
print(f'---\n bootstrax version {__version__}\n---')
# The folder that can be used for testing bootstrax (i.e. non production
# mode). It will be written to:
test_data_folder = ('/nfs/scratch/bootstrax/' if
os.path.exists('/nfs/scratch/bootstrax/')
else './bootstrax/')
# Timeouts in seconds
timeouts = {
# Waiting between escalating SIGTERM -> SIGKILL -> crashing bootstrax
# when trying to kill another process (usually child strax)
'signal_escalate': 3,
# Minimum waiting time to retry a failed run
# Escalates exponentially on repeated failures: 1x, 5x, 25x, 125x, 125x, 125x, ...
# Some jitter is applied: actual delays will randomly be 0.5 - 1.5x as long
'retry_run': 60,
# Maximum time for strax to complete a processing
# if exceeded, strax will be killed by bootstrax
'max_processing_time': 7200,
# Sleep between checking whether a strax process is alive
'check_on_strax': 10,
# Maximum time a run is 'busy' without a further update from
# its responsible bootstrax. Bootstrax normally updates every
# check_on_strax seconds, so make sure this is substantially
# larger than check_on_strax.
'max_busy_time': 120,
# Maximum time a run is in the 'considering' state
# if exceeded, will be labeled as an untracked failure
'max_considering_time': 60,
# Minimum time to wait between database cleanup operations
'cleanup_spacing': 60,
# Sleep time when there is nothing to do
'idle_nap': 10,
# If we don't hear from a bootstrax on another host for this long,
# remove its entry from the bootstrax status collection
# Must be much longer than idle_nap and check_on_strax!
'bootstrax_presumed_dead': 300,
# Ebs3-5 normally do all the processing. However if all are busy
# for a longer period of time, the ebs0-2 can also help with
# processing.
'eb3-5_max_busy_time': 5 * 60,
# Bootstrax writes it's state to the daq-database. To have a backlog we store this
# state using a TTL collection. To prevent too many entries in this backlog, only
# create new entries if the previous entry is at least this old (in seconds).
'min_status_interval': 60
}
# The disk that the eb is writing to may fill up at some point. The data should
# be written to datamanager at some point. This may clean up data on the disk,
# hence, we can check if there is sufficient diskspace and if not, wait a while.
# Below are the max number of times and number of seconds bootstrax will wait.
wait_diskspace_max_space_percent = 90
wait_diskspace_n_max = 60 * 24 * 7 # times
wait_diskspace_dt = 60 # seconds
if timeouts['bootstrax_presumed_dead'] < wait_diskspace_dt:
raise ValueError("wait_diskspace_dt too large")
# Fields in the run docs that bootstrax uses. Pay attention to the taling spaces!
bootstrax_projection = ("name start end number bootstrax status mode detectors "
"data.host "
"data.type "
"data.location "
"daq_config.processing_threads "
"daq_config.compressor "
"daq_config.strax_fragment_payload_bytes "
"daq_config.strax_chunk_length "
"daq_config.strax_chunk_overlap".split())
# Filename for temporary storage of the exception
# This is used to communicate the exception from the strax child process
# to the bootstrax main process
exception_tempfile = 'last_bootstrax_exception.txt'
# The name of the thread that is opened to delete live_data
delete_thread_name = 'DeleteThread'
# boostrax state for 'dead' or old entries in the bs_coll
dead_state = 'dead_bootstrax'
# The maximum time difference (s) allowed between the timestamps in the data and the
# duration of the run (from the runs metadeta). Fail if the difference is larger than:
max_timetamp_diff = 5
# The maximum number of retries for processing a run. After this many times of retrying
# to process a run, the DAQ-group has to either manually fix this run or manually fail it.
max_n_retry = 20
# Bootstrax retries runs multiple times. If there have been this many fails we could lower
# the resources to be somewhat more lenient on the CPU and RAM of the eventbuilders. Use
# this option with care as we might be running in a sub-optimal mode that may not be
# noticed and eventbuilders may be spending more time on trying to reprocess failed runs.
lower_resources_after_n_failures = 10
##
# Initialize globals (e.g. rundb connection)
##
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
log = logging.getLogger()
hostname = socket.getfqdn()
# Set the output folder
output_folder = '/data/xenonnt_processed/'
if not args.production:
# This means we are in some test mode
wait_diskspace_max_space_percent = 80
output_folder = test_data_folder
if not os.path.exists(output_folder):
log.warning(f'Creating {output_folder}')
os.mkdir(output_folder)
log.warning(
f'\n---------------'
f'\nBe aware, bootstrax not running in production mode. Specify with --production.'
f'\nWriting new data to {output_folder}. Not saving this location in the runsDB.'
f'\nNot writing to the runs-database.'
f'\n---------------')
time.sleep(5)
else:
if not args.delete_live:
log.warning("Production mode is designed to run with '--delete_live'\n"
"please restart bootstrax")
if not args.infer_mode:
log.warning("Better performance is expected in production mode with "
"'--infer_mode'\nplease restart bootstrax")
def new_context(cores=args.cores, max_messages=args.max_messages, timeout=300):
"""Create strax context that can access the runs db"""
# We use exactly the same logic of straxen to access the runs DB;
# this avoids duplication, and ensures strax can access the runs DB if we can
context = straxen.contexts.xenonnt_online(
output_folder=output_folder,
we_are_the_daq=True,
allow_multiprocess=cores > 1,
allow_shm=cores > 1,
max_messages=max_messages,
timeout=timeout)
if not args.production:
context.storage = [strax.DataDirectory(output_folder)]
return context
st = new_context()
# DAQ database
daq_db_name = 'daq'
daq_uri = straxen.get_mongo_uri(header='rundb_admin',
user_key='mongo_daq_username',
pwd_key='mongo_daq_password',
url_key='mongo_daq_url')
daq_client = pymongo.MongoClient(daq_uri)
daq_db = daq_client[daq_db_name]
bs_coll = daq_db['eb_monitor']
ag_stat_coll = daq_db['aggregate_status']
log_coll = daq_db['log']
# Runs database
run_dbname = straxen.uconfig.get('rundb_admin', 'mongo_rdb_database')
run_collname = 'runs'
if args.production:
run_db = st.storage[0].client[run_dbname]
else:
# Please note, this is a read only account on the rundb
run_uri = straxen.get_mongo_uri()
run_client = pymongo.MongoClient(run_uri)
run_db = run_client[run_dbname]
run_coll = run_db[run_collname]
run_db.command('ping')
run_coll = run_db[run_collname]
# Ping the databases to ensure the mongo connections are working
if not args.undying:
run_db.command('ping')
daq_db.command('ping')
def main():
# Check that writing access is OK, otherwise report to the database and die
if os.access(output_folder, os.W_OK) is not True:
message = f'No writing access to {output_folder}'
log_warning(message, priority='fatal')
raise IOError(message)
if args.cores == -1:
# Use all of the available cores on this machine
args.cores = multiprocessing.cpu_count()
log.info(f'Set cores to n_tot, using {args.cores} cores')
if args.fail:
args.fail += [''] # Provide empty reason if none specified
manual_fail(number=int(args.fail[0]), reason=args.fail[1])
elif args.abandon:
number = int(args.abandon[0])
if len(args.abandon) > 1:
manual_fail(number=number, reason=args.abandon[1])
abandon(number=number)
elif args.process:
t_start = now()
number = args.process
rd = consider_run({'number': number})
if rd is None:
message = f"Trying to process single run but no run numbered {number} exists"
log_warning(message, priority='fatal')
raise ValueError(message)
process_run(rd)
log.info(
f'bootstrax ({hostname}) finished run {number} in {(now() - t_start).seconds} seconds')
wait_on_delete_thread()
else:
# Start processing
main_loop()
##
# Main loop
##
def main_loop():
"""Infinite loop looking for runs to process"""
# Ensure we're the only bootstrax on this host
any_other_running = list(bs_coll.find({'host': hostname,
'pid': {'$ne': os.getpid()}}))
for x in any_other_running:
if pid_exists(x['pid']) and x['pid']:
log.warning(f'Bootstrax already running with PID {x["pid"]}, trying to kill it.')
kill_process(x['pid'])
# # Register ourselves
set_state('starting')
t_start = now()
next_cleanup_time = now()
# keep track of the ith run that we have seen when we are not in production mode
new_runs_seen, failed_runs_seen = 0, 1
while True:
log.info(f'bootstrax running for {(now() - t_start).seconds} seconds')
sufficient_diskspace()
log.info("Looking for work")
if not eb_can_process():
# Nothing to do, let's do some cleanup and set to idle
delete_temp_files()
set_state('idle')
time.sleep(timeouts['idle_nap'])
continue
set_state('busy')
# Check resources are still OK, otherwise crash / reboot program
# Process new runs
rd = consider_run({"bootstrax.state": None},
test_counter=new_runs_seen)
if rd is not None:
new_runs_seen += 1
process_run(rd)
continue
# Scan DB for runs with unusual problems
if now() > next_cleanup_time:
cleanup_db()
next_cleanup_time = now(plus=timeouts['cleanup_spacing'])
# Any failed runs to retry?
# Only try one run, we want to be back for new runs quickly
rd = consider_run({"bootstrax.state": 'failed',
"bootstrax.n_failures": {'$lt': max_n_retry},
"bootstrax.next_retry": {'$lt': now()}
}, test_counter=failed_runs_seen)
if rd is not None:
failed_runs_seen += 1
process_run(rd)
continue
# Nothing to do, let's do some cleanup
delete_temp_files()
if not args.production:
log.info(f'We have gone through the rundDB in a readonly mode there are no '
f'runs left. We looked at {new_runs_seen} new runs and '
f'{failed_runs_seen} previously failed runs.')
break
log.info("No work to do, waiting for new runs or retry timers")
set_state('idle')
time.sleep(timeouts['idle_nap'])
##
# General helpers
##
def now(plus=0):
return datetime.now(pytz.utc) + timedelta(seconds=plus)
def kill_process(pid, wait_time=None):
"""Kill process pid, or raise RuntimeError if we cannot
:param wait_time: time to wait before escalating signal strength
"""
if wait_time is None:
wait_time = timeouts['signal_escalate']
if not pid_exists(pid):
return
for sig in [signal.SIGTERM, signal.SIGKILL, 'die']:
time.sleep(wait_time)
if not pid_exists(pid):
return
if signal == 'die':
message = f"Could not kill process {pid}"
log_warning(message, priority='fatal')
raise RuntimeError(message)
os.kill(pid, sig)
def override_target(rd):
"""
Check if the target should be overridden based on the mode of the DAQ for this run
:param rd: rundoc
:return: override False for LED or raw_records
"""
if args.fix_target:
return False
# Special modes override target for these
led_modes = ['pmtgain']
diagnostic_modes = ['exttrig', 'noise', 'pmtap']
mode = str(rd.get('mode'))
detectors = list(rd.get('detectors'))
log.info(f'override_target::\tmode is {mode}, changing target if needed')
if np.any([m in mode for m in led_modes]):
return 'led_calibration'
elif np.any([m in mode for m in diagnostic_modes]):
return 'raw_records'
elif 'tpc' not in detectors:
# Don't make any high level if we don't have a TPC
return 'raw_records'
else:
return False
def set_state(state, update_fields=None):
"""Inform the bootstrax collection we're in a different state
if state is None, leave state unchanged, just update heartbeat time
"""
# Find the last message of this host
previous_entry = bs_coll.find_one({'host': hostname},
sort=[('time', pymongo.DESCENDING)])
if not state:
state = 'None' if not previous_entry else previous_entry.get('state')
bootstrax_state = dict(
host=hostname,
pid=os.getpid(),
time=now(),
state=state,
target=args.target,
max_cores=args.cores,
max_messages=args.max_messages,
undying=args.undying,
production_mode=args.production
)
if update_fields:
bootstrax_state.update(update_fields)
if (not previous_entry or
(now() - previous_entry['_id'].generation_time).seconds > timeouts[
'min_status_interval']):
bs_coll.insert_one(bootstrax_state)
else:
bs_coll.update_one({'_id': previous_entry['_id']}, {'$set': bootstrax_state})
def send_heartbeat(update_fields=None):
"""Inform the bootstrax collection we're still here
Use during long-running tasks where state doesn't change
"""
# Same as set_state, just don't change state
set_state(None, update_fields=update_fields)
def log_warning(message, priority='warning', run_id=None):
"""Report a warning to the terminal (using the logging module)
and the DAQ log DB.
:param message: insert string into log_coll
:param priority: severity of warning. Can be:
info: 1,
warning: 2,
<any other valid python logging level, e.g. error or fatal>: 3
:param run_id: optional run id.
"""
if not args.production:
return
getattr(log, priority)(message)
# Log according to redax rules
# https://github.com/coderdj/redax/blob/master/MongoLog.hh#L22
warning_message = {
'message': message,
'user': f'bootstrax_{hostname}',
'priority':
dict(debug=0,
info=1,
warning=2,
error=3,
fatal=4,
).get(priority.lower(), 3)}
if run_id is not None:
warning_message.update({'runid': int(run_id)})
log_coll.insert_one(warning_message)
def eb_can_process():
""""The new ebs (eb3-5) should be sufficient to process all data. In exceptional
circumstances eb3-5 cannot keep up. Only let eb0-2 also process data in such cases.
Before eb0-2 are also used for processing two criteria have to be fulfilled:
- There should be runs waiting to be processed
- Eb3-5 should be busy processing for a substantial time.
:returns: bool if this host should process a run"""
# eb3-5 always process.
if hostname in ['eb3.xenon.local', 'eb4.xenon.local', 'eb5.xenon.local']:
return True
# In test mode we can always process
if not args.production:
return True
elif 'eb2' in hostname:
log_warning('Why is eb2 alive?!', priority='error')
return False
# Check that there are runs that are waiting to be processed. If there are few, this
# eb should not process.
max_queue_new_runs = 2
# Count number of runs untouched by bootstrax.
n_untouched_runs = run_coll.count_documents({'bootstrax.state': None})
# Check that eb3-5 are all busy for at least some time.
n_ebs_running = 0
n_ebs_busy = 0
for eb_i in range(3, 6):
# Should count if eb3-5 are registered as running (as one might be offline).
bootstrax_on_host = bs_coll.find_one({
'host': f'eb{eb_i}.xenon.local',
'time': {'$gt': now(-timeouts['bootstrax_presumed_dead'])}},
sort=[('time', pymongo.DESCENDING)])
if bootstrax_on_host:
n_ebs_running += 1
running_eb = run_coll.find_one({
'bootstrax.state': 'busy',
'bootstrax.host': f'eb{eb_i}.xenon.local',
'bootstrax.started_processing': {
'$lt': now(-timeouts['eb3-5_max_busy_time'])}})
if running_eb:
n_ebs_busy += 1
log.info(f'eb_can_process::\t eb{eb_i} is busy')
if ((n_ebs_running == n_ebs_busy and n_untouched_runs > max_queue_new_runs) or
not n_ebs_running):
# There is a need for this eb to also process data (for now).
log.info(f'eb_can_process::\tThere is a need for {hostname} to process data. As we'
f' have {n_ebs_running} running and {n_ebs_busy} busy')
return True
elif n_untouched_runs and n_untouched_runs < max_queue_new_runs:
log.info(f'eb_can_process::\tDo not process on {hostname} as there are few new runs.')
else:
log.info(f'eb_can_process::\tDo not process on {hostname}, new ebs are taking care')
log.info(
f'eb_can_process::\trunning: {n_ebs_running}\tbusy: {n_ebs_busy}\tqueue: {n_untouched_runs}')
return False
def infer_mode(rd):
"""Infer a safe operating mode of running bootstrax based on the size of the first
chunk. Estimating save parameters for running bootstrax from:
https://xe1t-wiki.lngs.infn.it/doku.php?id=xenon:xenonnt:dsg:daq:eb_speed_tests_update
returns: dictionary of how many cores and max_messages should be used based on an
estimated data rate.
"""
default_opt = dict(cores=args.cores, max_messages=args.max_messages, timeout=300)
# Get data rate from dispatcher
try:
docs = ag_stat_coll.aggregate([
{'$match': {'number': rd['number']}},
{'$group': {'_id': '$detector', 'rate': {'$max': '$rate'}}}
])
data_rate = sum([d['rate'] for d in docs])
except Exception as e:
log_warning(f'infer_mode ran into {e}. Cannot infer mode, using default mode.',
run_id=f'{rd["number"]:06}', priority='info')
data_rate = None
# Find out if eb is new (eb3-eb5):
is_new_eb = int(hostname[2]) >= 3 # ebX.xenon.local
log.info(f"infer_mode::\teb{int(hostname[2])}")
eb = 'new_eb' if is_new_eb else 'old_eb'
# Return a run-mode that is empirically found to provide stable but fast processing.
log.info(f'infer_mode::\tWorking with a data rate of {data_rate:.1f} MB/s')
# TODO
# decrease timeout later. Unsure why needed now.
run_settings = {
'new_eb':
{'low_rate': dict(cores=24, max_messages=20, timeout=500),
'med_rate': dict(cores=24, max_messages=20, timeout=600),
'high_rate': dict(cores=20, max_messages=15, timeout=1000),
'very_high_rate': dict(cores=20, max_messages=15, timeout=1500),
'max_rate': dict(cores=16, max_messages=10, timeout=2000)},
'old_eb':
{'low_rate': dict(cores=30, max_messages=20, timeout=500),
'med_rate': dict(cores=20, max_messages=15, timeout=600),
'high_rate': dict(cores=10, max_messages=15, timeout=1000),
'very_high_rate': dict(cores=10, max_messages=10, timeout=1500),
'max_rate': dict(cores=8, max_messages=10, timeout=2000)}}
if data_rate is None:
result = default_opt
elif data_rate < 100:
result = run_settings[eb]['low_rate']
elif data_rate < 200:
result = run_settings[eb]['med_rate']
elif data_rate < 500:
result = run_settings[eb]['high_rate']
elif data_rate < 700:
result = run_settings[eb]['very_high_rate']
else:
result = run_settings[eb]['max_rate']
log.info(f'infer_mode::\tOverride processing mode for run {rd["number"]} changing to:'
f'\t {result}')
# Lower the settings if there are continuous failures on this run:
if rd.get('bootstrax', {}).get('n_failures', 0) >= lower_resources_after_n_failures:
result = dict(cores=max(4, result['cores'] // 2),
max_messages=max(4, result['max_messages'] // 2),
timeout=min(1000, result['timeout'] * 1.5)
)
log_warning(f'infer_mode::\tRepeated failures. Lowering mode to {result}. This '
f'may indicated a sub-optimal state of {hostname}!',
priority='info',
run_id=f'{rd["number"]:06}')
return result
##
# Host interactions
##
def sufficient_diskspace():
"""Check if there is sufficient space available on the local disk to write to"""
for i in range(wait_diskspace_n_max):
disk_pct = disk_usage(output_folder).percent
if disk_pct < wait_diskspace_max_space_percent:
log.info(f'Check disk space: {disk_pct:.1f}% full')
# Sufficient space to write to, let's continue
return
else:
log.info(f'Insufficient free disk space ({disk_pct:.1f}% full) '
f'on {hostname}. Waiting {wait_diskspace_dt} s ({i}th iteration)')
time.sleep(wait_diskspace_dt)
send_heartbeat(dict(state='disk full'))
set_state(dead_state)
message = f"No disk space to write to. Kill bootstrax on {hostname}"
log_warning(message, priority='fatal')
raise RuntimeError(message)
def delete_temp_files():
"""
removes _temp files from output_folder if successfully processed
"""
temp_files = [f for f in os.listdir(output_folder) if f.endswith('_temp')]
for f in temp_files:
run_id = int(f.split('-')[0])
if run_coll.find_one(
{'bootstrax.state': 'done',
'number': run_id},
{'_id': 1}):
log.info(f'removing {output_folder}/{f}')
shutil.rmtree(f'{output_folder}/{f}')
def delete_live_data(rd, live_data_path):
"""
Open thread to delete the live_data
"""
if args.production and os.path.exists(live_data_path) and args.delete_live:
delete_thread = threading.Thread(name=delete_thread_name,
target=_delete_data,
args=(rd, live_data_path, 'live'))
log.info(f'Starting thread to delete {live_data_path} at {now()}')
# We rather not stop deleting the live_data if something else
# fails. Set the thread to daemon.
delete_thread.setDaemon(True)
delete_thread.start()
log.info(f'DeleteThread {live_data_path} should be running in parallel, '
f'continue MainThread now: {now()}')
def _delete_data(rd, path, data_type):
"""After completing the processing and updating the runsDB, remove the
live_data"""
if data_type == 'live' and not args.delete_live and args.production:
message = 'Unsafe operation. Trying to delete live data!'
log_warning(message, priority='fatal')
raise ValueError(message)
log.info(f'Deleting data at {path}')
if os.path.exists(path):
shutil.rmtree(path)
log.info(f'deleting {path} finished')
# Remove the data location from the rundoc and append it to the 'deleted_data' entries
if not os.path.exists(path):
log.info('changing data field in rundoc')
for ddoc in rd['data']:
if ddoc['type'] == data_type:
break
for k in ddoc.copy().keys():
if k in ['location', 'meta', 'protocol']:
ddoc.pop(k)
ddoc.update({'at': now(), 'by': hostname})
log.info(f'update with {ddoc}')
run_coll.update_one({'_id': rd['_id']},
{"$addToSet": {'deleted_data': ddoc},
"$pull": {"data":
{"type": data_type,
"host": {'$in': ['daq', hostname]}}}})
else:
message = f"Something went wrong we wanted to delete {path}!"
log_warning(message, priority='fatal')
raise ValueError(message)
def wait_on_delete_thread():
"""
Check that the thread with the delete_thread_name is finished before continuing.
"""
threads = threading.enumerate()
for thread in threads:
if thread.name == delete_thread_name:
wait = True
while wait:
wait = False
if thread.isAlive():
log.info(f'{thread.name} still running take a {timeouts["idle_nap"]} s nap')
time.sleep(timeouts['idle_nap'])
wait = True
log.info(f'Checked that {delete_thread_name} finished')
def clear_shm():
"""Manually delete files in /dev/shm/ created by npshmex on starup."""
shm_dir = '/dev/shm/'
shm_files = [f for f in os.listdir(shm_dir) if 'npshmex' in f]
if not len(shm_files):
return
log.info(f'clear_shm:: clearing {len(shm_files)} files')
for f in tqdm(shm_files):
os.remove(shm_dir + f)
##
# Run DB interaction
##
def ping_dbs():
while True:
try:
run_db.command('ping')
daq_db.command('ping')
break
except Exception as ping_error:
log_warning(
f'Failed to connect to Mongo. Ran into {ping_error}. Sleep '
f'for a minute.', priority='warning')
time.sleep(60)
def get_run(*, mongo_id=None, number=None, full_doc=False):
"""Find and return run doc matching mongo_id or number
The bootstrax state is left unchanged.
:param full_doc: If true (default is False), return the full run doc
rather than just fields used by bootstrax.
"""
if number is not None:
query = {'number': number}
elif mongo_id is not None:
query = {'_id': mongo_id}
else:
# This means you are not running a normal bootstrax (no reason to report to rundb)
raise ValueError("Please give mongo_id or number")
return run_coll.find_one(query, projection=None if full_doc else bootstrax_projection)
def set_run_state(rd, state, return_new_doc=True, **kwargs):
"""Set state of run doc rd to state
return_new_doc: if True (default), returns new document.
if False, instead returns the original (un-updated) doc.
Any additional kwargs will be added to the bootstrax field.
"""
if not args.production:
return run_coll.find_one({'_id': rd['_id']})
bd = rd['bootstrax']
bd.update({
'state': state,
'host': hostname,
'time': now(),
**kwargs})
if state == 'failed':
bd['n_failures'] = bd.get('n_failures', 0) + 1
return run_coll.find_one_and_update(
{'_id': rd['_id']},
{'$set': {'bootstrax': bd}},
return_document=return_new_doc,
projection=bootstrax_projection)
def check_data_written(rd):
"""
checks that the data as written in the runs-database is actually
available on this machine
:param rd: rundoc
:return: type bool, False if not all paths exist or if there are no files
on this host
"""
files_written = 0
for ddoc in rd['data']:
if ddoc['host'] == hostname:
if os.path.exists(ddoc['location']):
files_written += 1
else:
return False
return files_written > 0
def all_files_saved(rd, wait_max=600, wait_per_cycle=10):
"""
Check that all files are written. It might be that the savers are still in
the process of renaming from folder_temp to folder. Hence allow some wait
time to allow the savers to finish
:param rd: rundoc
:param wait_max: max seconds to wait for data to save
:param wait_per_cycle: wait this many seconds if the data is not yet there
"""
start = time.time()
while not check_data_written(rd):
if time.time() - start > wait_max:
return False
send_heartbeat()
time.sleep(wait_per_cycle)
return True
def upload_file_metadata(rd):
"""
Update the data-field in the rundoc with a portion of the metadata. Also count the
number of files on the location on the basis of the data entry in the rundoc. The
filecount info is used for Admix to checksum that all the files are correctly uploaded
to Rucio.
:param rd: rundoc
"""
try:
st_meta = new_context(cores=args.cores, max_messages=args.max_messages, timeout=100)
st_meta.set_context_config({'forbid_creation_of': '*'})
except Exception as e:
log_warning(f"Cannot create context to read the metadata: {e}", priority='warning')
st_meta = None
for ddoc in rd['data']:
if hostname != ddoc['host']:
continue
loc = ddoc.get('location', '')
if os.path.exists(loc):
file_count = len(os.listdir(loc))
# Can also get the latter from st.lineage_for but too lazy for that
data_size_mb, avg_data_size_mb, lineage_hash = None, None, None
run_id = '%06d' % rd['number']
if st_meta is not None:
try:
md = st.get_meta(run_id, ddoc['type'])
chunk_mb = [chunk['nbytes'] / (1e6) for chunk in md['chunks']]
data_size_mb = int(np.sum(chunk_mb))
avg_data_size_mb = int(np.average(chunk_mb))
lineage_hash = md['lineage_hash']
except Exception as e:
log_warning(f"Cannot load metadata of {ddoc['type']}: {e}",
priority='warning',
run_id=f'{rd["number"]:06}')
run_coll.update_one(
{'_id': rd['_id'],
'data.location': ddoc['location']},
{'$set':
{'data.$.file_count': file_count,
'data.$.meta.strax_version': strax.__version__,
'data.$.meta.straxen_version': straxen.__version__,
'data.$.meta.size_mb': data_size_mb,
'data.$.meta.avg_chunk_mb': avg_data_size_mb,
'data.$.meta.lineage_hash': lineage_hash}
})
def set_status_finished(rd):
"""Set the status to ready to upload for datamanager and admix"""
# Check mongo connection
ping_dbs()
if not args.production:
# Don't update the status if we are not in production mode
return
# First check that all the data is available (that e.g. no _temp files
# are being renamed). This line should be over-redundant as we already
# check earlier.
all_files_saved(rd)
# Only update the status if it does not exist or if it needs to be uploaded
ready_to_upload = {'status': 'eb_ready_to_upload'}
if rd.get('status') in [None, 'needs_upload']:
run_coll.update_one(
{'_id': rd['_id']},
{'$set': ready_to_upload})
elif rd.get('status') == ready_to_upload.get('status'):
# This is strange, bootstrax already finished this run before
log_warning('WARNING: bootstax has already marked this run as ready '
'for upload. Doing nothing.',
priority='warning',
run_id=f'{rd["number"]:06}')
else:
# Do not override this field for runs already uploaded in admix
message = (f'Trying to set set the status {rd.get("status")} to '
f'{ready_to_upload}! One should not override this field.')
log_warning(message, priority='fatal')
raise ValueError(message)
def abandon(*, mongo_id=None, number=None):
"""Mark a run as abandoned"""
set_run_state(
get_run(mongo_id=mongo_id, number=number),
'abandoned')
def consider_run(query, return_new_doc=True, test_counter=0):
"""
Return one run doc matching query, and simultaneously set its
bootstrax state to 'considering'
"""
# We must first do an atomic find-and-update to set the run's state
# to "considering", to ensure the run doesn't get picked up by a
# bootstrax on another host.
if args.production:
rd = run_coll.find_one_and_update(
query,
{"$set": {'bootstrax.state': 'considering'}},
projection=bootstrax_projection,
return_document=True,
sort=[('start', pymongo.DESCENDING)])
# Next, we can update the bootstrax entry properly with set_run_state
# (adding hostname, time, etc.)
if rd is None:
return None
return set_run_state(rd, 'considering', return_new_doc=return_new_doc)
else:
# Don't change the runs-database for test modes
try:
rds = run_coll.find(
query,
projection=bootstrax_projection,
sort=[('start', pymongo.DESCENDING)])
return rds[test_counter]
except IndexError:
return None
def fail_run(rd, reason, error_traceback=''):
"""Mark the run represented by run doc rd as failed with reason"""
if 'number' not in rd:
long_run_id = f"run <no run number!!?>:{rd['_id']}"
else:
long_run_id = f"run {rd['number']}:{rd['_id']}"
# No bootstrax info is present when manually failing a run with args.fail
if 'bootstrax' not in rd.keys():
rd['bootstrax'] = {}
rd['bootstrax']['n_failures'] = 0
if 'n_failures' in rd['bootstrax'] and rd['bootstrax']['n_failures'] > 0:
fail_name = 'Repeated failure'
failure_message_level = 'info'
else:
fail_name = 'New failure'
failure_message_level = 'warning'
# Cleanup any data associated with the run
# TODO: This should become optional, or just not happen at all,
# after we're done testing (however, then we need some other
# pruning mechanism)
clean_run(mongo_id=rd['_id'])
# Report to run db
# It's best to do this after everything is done;
# as it changes the run state back away from 'considering', so another
# bootstrax could conceivably pick it up again.
set_run_state(rd, 'failed',
reason=reason + error_traceback,
next_retry=(
now(plus=(timeouts['retry_run']
* np.random.uniform(0.5, 1.5)
# Exponential backoff with jitter
* 5 ** min(rd['bootstrax'].get('n_failures', 0), 3)
))))
# Report to DAQ log and screen. Let's not also add the entire traceback
log_warning(f"{fail_name} on {long_run_id}: {reason}",
priority=failure_message_level,
run_id=f'{rd["number"]:06}')
def manual_fail(*, mongo_id=None, number=None, reason=''):
"""Manually mark a run as failed based on mongo_id or run number"""
rd = get_run(mongo_id=mongo_id, number=number)
fail_run(rd, "Manually set failed state. " + reason)
def get_compressor(rd, default_compressor="lz4"):
"""Read the compressor method from the run_doc. Return 'lz4' if no
information is specified in the run_doc"""
try:
return rd["daq_config"]["compressor"]
except KeyError:
log_warning(f"Bootstrax couldn't read the compressor form the run_doc. "
f"Assuming 'lz4' for now",
priority='info',
run_id=f'{rd["number"]:06}')
return default_compressor
##
# Processing
##
def run_strax(run_id, input_dir, target, n_readout_threads, compressor,
run_start_time, samples_per_record, process_mode, target_override,
daq_chunk_duration, daq_overlap_chunk_duration, n_fails,
debug=False):
# Check mongo connection
ping_dbs()
# Clear the swap memory used by npshmmex
npshmex.shm_clear()
# double check by forcefully clearing shm
clear_shm()
if debug:
log.setLevel(logging.DEBUG)
try:
log.info(f"Starting strax to make {run_id} with input dir {input_dir}")
# Never override the target for raw_records because we might be in a save mode
if target != 'raw_records' and target_override:
log.info(f'This is an non-standard run. Changing target from {target} to '
f'{target_override}.\nTo disable specify --fix_target')
if target_override == 'led_calibration':
# Increase the timeout a little
process_mode['timeout'] = process_mode['timeout'] * 5
target = target_override
st = new_context(**process_mode)
# Make a function for running strax, call the function to process the run
# This way, it can also be run inside a wrapper to profile strax
def st_make():
"""Run strax"""
strax_config = dict(daq_input_dir=input_dir,
daq_compressor=compressor,
run_start_time=run_start_time,
record_length=samples_per_record,
daq_chunk_duration=daq_chunk_duration,
daq_overlap_chunk_duration=daq_overlap_chunk_duration,
n_readout_threads=n_readout_threads,
check_raw_record_overlaps=False,
)
st.make(run_id, target,
config=strax_config,
max_workers=process_mode['cores'])
if (target != 'raw_records' and n_fails == 0) or args.fix_target:
# Make the nv, he, and mv data type only if we are:
# - Not processing up to raw_records (that means we are in some recovery mode)
# - We haven't failed this run before.
# or
# - Are fixing the target (this is useful for testing but normally not used)
for sub_d_target in args.sub_d_targets:
if sub_d_target not in st._plugin_class_registry:
log_warning(f'Trying to make unknown data type {sub_d_target}',
priority='info',
run_id=run_id)
continue
elif not st.is_stored(run_id, sub_d_target):
st.make(run_id, sub_d_target,
config=strax_config,
max_workers=process_mode['cores'])
if args.profile.lower() == 'false':
st_make()
else:
prof_file = f'run{run_id}_{args.profile}'
if '.prof' not in prof_file:
prof_file += '.prof'
log.info(f'starting with profiler, saving as {prof_file}')
with strax.profile_threaded(prof_file):
st_make()
except Exception as e:
# Write exception to file, so bootstrax can read it
exc_info = strax.formatted_exception()
with open(exception_tempfile, mode='w') as f:
f.write(exc_info)
with open(f'./bootstrax_exceptions/{run_id}_exception.txt', mode='w') as f:
f.write(exc_info)
raise
def process_run(rd, send_heartbeats=args.production):
log.info(f"Starting processing of run {rd['number']}")
if rd is None:
raise RuntimeError("Pass a valid rundoc, not None!")
# Shortcuts for failing
class RunFailed(Exception):
pass
def fail(reason, **kwargs):
if args.production:
fail_run(rd, reason, **kwargs)
else:
log.warning(reason)
raise RunFailed
try:
try:
run_id = '%06d' % rd['number']
except Exception as e:
fail(f"Could not format run number: {str(e)}")
for dd in rd['data']:
if 'type' not in dd:
fail("Corrupted data doc, found entry without 'type' field")
if dd['type'] == 'live':
break
elif not args.production:
# We are just testing let's assume its on the usual location
dd = {'type': 'live', 'location': '/live_data/xenonnt/', 'host': 'daq'}
else:
fail("Non-live data already registered; untracked failure?")
else:
if not args.production:
# We are just testing let's assume its on the usual location
dd = {'type': 'live', 'location': '/live_data/xenonnt/', 'host': 'daq'}
else:
fail(f"No live data entry in rundoc")
if not osp.exists(dd['location']):
fail(f"No access to live data folder {dd['location']}")
if 'daq_config' not in rd:
fail('No daq_config in the rundoc!')
try:
# Fetch parameters from the rundoc. If not readable, let's use redax' default
# values (that are hardcoded here).
dq_conf = rd['daq_config']
to_read = ('processing_threads', 'strax_chunk_length', 'strax_chunk_overlap',
'strax_fragment_payload_bytes')
report_missing_config = [conf for conf in to_read if conf not in dq_conf]
if report_missing_config:
log_warning(f'{", ".join(report_missing_config)} not in rundoc for '
f'{run_id}! Using default values.',
priority='info',
run_id=run_id)
thread_info = dq_conf.get('processing_threads', dict())
n_readout_threads = sum([v for v in thread_info.values()])
daq_chunk_duration = int(dq_conf.get('strax_chunk_length', 5) * 1e9)
daq_overlap_chunk_duration = int(dq_conf.get('strax_chunk_overlap', 0.5) * 1e9)
# note that value in rd in bytes hence //2
samples_per_record = dq_conf.get('strax_fragment_payload_bytes', 220) // 2
if not samples_per_record == 110:
log.info(f'Samples_per_record = {samples_per_record}')
except Exception as e:
fail(f"Could not find {to_read} in rundoc: {str(e)}")
if not n_readout_threads:
fail(f"Run doc for {run_id} has no readout thread count info")
loc = osp.join(dd['location'], run_id)
if not osp.exists(loc):
fail(f"No live data at claimed location {loc}")
# Remove any previous processed data
# If we do not do this, strax will just load this instead of
# starting a new processing
if args.production:
clean_run(mongo_id=rd['_id'])
else:
clean_run_test_data(run_id)
# Remove any temporary exception info from previous runs
if osp.exists(exception_tempfile):
os.remove(exception_tempfile)
target = args.target
n_fails = rd['bootstrax'].get('n_failures', 0)
if not args.production and 'bootstrax' not in rd:
# Bootstrax does not register in non-production mode
pass
elif n_fails > 1 and not args.process:
# Failed before, and on autopilot: do just raw_records
target = 'raw_records'
compressor = get_compressor(rd)
target_override = override_target(rd)
try:
run_start_time = rd['start'].replace(tzinfo=timezone.utc).timestamp()
except Exception as e:
fail(f"Could not find start in datetime.datetime object: {str(e)}")
if args.infer_mode:
process_mode = infer_mode(rd)
else:
process_mode = dict(cores=args.cores, max_messages=args.max_messages, timeout=300)
strax_proc = multiprocessing.Process(
target=run_strax,
args=(run_id, loc, target, n_readout_threads, compressor,
run_start_time, samples_per_record, process_mode, target_override,
daq_chunk_duration, daq_overlap_chunk_duration, n_fails,
args.debug))
t0 = now()
info = dict(started_processing=t0)
strax_proc.start()
while True:
if send_heartbeats:
update = process_mode.copy()
update.update(dict(run_id=run_id,
target=target_override if target_override else args.target))
send_heartbeat(update)
ec = strax_proc.exitcode
if ec is None:
if t0 < now(-timeouts['max_processing_time']):
fail(f"Processing took longer than {timeouts['max_processing_time']} sec")
kill_process(strax_proc.pid)
# Still working, check in later
# TODO: is there a good way to detect hangs, before max_processing_time expires?
log.info(f"Still processing run {run_id}")
if args.production:
set_run_state(rd, 'busy', **info)
time.sleep(timeouts['check_on_strax'])
continue
elif ec == 0:
log.info(f"Strax done on run {run_id}, performing basic data quality check")
if args.ignore_checks:
# I hope you know what you are doing, we are not going to
# do any of the checks below
pass
else:
try:
# Sometimes we have only he channels or mv channels,
# try loading one until we get one with chunks.
for rr_type in ('raw_records',
'raw_records_he',
'raw_records_mv',
'raw_records_nv'
):
md = st.get_meta(run_id, rr_type)
if len(md['chunks']) and (
'first_time' in md['chunks'][0] and
'last_endtime' in md['chunks'][0]
):
break
except Exception:
fail("Processing succeeded, but metadata not readable",
error_traceback=traceback.format_exc())
if not len(md['chunks']):
fail("Processing succeeded, but no chunks were written!")
rd = get_run(mongo_id=rd['_id'])
if 'end' not in rd or rd['end'] is None:
fail("Processing succeeded, but run hasn't yet ended!")
# Check that the data written covers the run
# (at least up to some fudge factor)
# Since chunks can be empty, and we don't want to crash,
# this has to be done with some care...
# Lets assume some ridiculous timestamp (in ns): 10e9*1e9
t_covered = timedelta(
seconds=(max([x.get('last_endtime', 0) for x in md['chunks']]) -
min([x.get('first_time', 10e9 * 1e9) for x in
md['chunks']])) / 1e9)
run_duration = rd['end'] - rd['start']
if not (0 < t_covered.seconds < float('inf')):
fail(f"Processed data covers {t_covered} sec")
if not (timedelta(seconds=-max_timetamp_diff)
< (run_duration - t_covered)
< timedelta(seconds=max_timetamp_diff)):
fail(f"Processing covered {t_covered.seconds}, "
f"but run lasted {run_duration.seconds}!")
if not all_files_saved(rd):
fail("Not all files in the rundoc for this run are saved")
log.info(f"Run {run_id} processed successfully")
if args.production:
set_run_state(rd, 'done', **info)
set_status_finished(rd)
upload_file_metadata(rd)
if args.delete_live:
delete_live_data(rd, loc)
break
else:
# This is just the info that we're starting
# exception retrieval. The actual error comes later.
log.info(f"Failure while processing run {run_id}")
if osp.exists(exception_tempfile):
with open(exception_tempfile, mode='r') as f:
exc_info = f.read()
if not exc_info:
exc_info = '[No exception info known, exception file was empty?!]'
else:
exc_info = "[No exception info known, exception file not found?!]"
fail(f"Strax exited with exit code {ec}.",
error_traceback=f'Exception info: {exc_info}')
except RunFailed:
return
##
# Cleanup
##
def clean_run(*, mongo_id=None, number=None, force=False):
"""Removes all data on this host associated with a run
that was previously registered in the run db.
Does NOT remove temporary folders,
nor data that isn't registered to the run db.
"""
# We need to get the full data docs here, since I was too lazy to write
# a surgical update below
rd = get_run(mongo_id=mongo_id, number=number, full_doc=True)
have_live_data = False
for dd in rd['data']:
if dd['type'] == 'live':
have_live_data = True
break
for ddoc in rd['data']:
if 'host' in ddoc and ddoc['host'] == hostname:
loc = ddoc['location']
if not force and not have_live_data and 'raw_records' in ddoc['type']:
log.info(f'prevent {loc} from being deleted. The live_data has already'
f' been removed')
elif os.path.exists(loc):
log.info(f'delete data at {loc}')
_delete_data(rd, loc, ddoc['type'])
else:
loc = loc + '_temp'
log.info(f'delete data at {loc}')
_delete_data(rd, loc, ddoc['type'])
def clean_run_test_data(run_id):
"""
Clean the data in the test_data_folder associated with this run_id
"""
for folder in os.listdir(test_data_folder):
if run_id in folder:
log.info(f'Cleaning {test_data_folder + folder}')
shutil.rmtree(test_data_folder + folder)
def cleanup_db():
"""Find various pathological runs and clean them from the db
Also cleans the bootstrax collection for stale entries
"""
# Check mongo connection
ping_dbs()
log.info("Checking for bad stuff in database")
# Check for all the ebs if their last state message is not longer
# ago than the time we assume that the eb is dead.
for eb_i in range(6):
bd = bs_coll.find_one(
{'host': f'eb{eb_i}.xenon.local'},
sort=[('time', pymongo.DESCENDING)])
if (bd and
bd['time'].replace(tzinfo=pytz.utc) < now(
-timeouts['bootstrax_presumed_dead']) and
bd['state'] is not dead_state):
bs_coll.find_one_and_update({'_id': bd['_id']},
{'$set': {'state': dead_state}})
# Runs that say they are 'considering' or 'busy' but nothing happened for a while
for state, timeout in [
('considering', timeouts['max_considering_time']),
('busy', timeouts['max_busy_time'])]:
while True:
send_heartbeat()
rd = consider_run(
{'bootstrax.state': state,
'bootstrax.time': {'$lt': now(-timeout)}},
return_new_doc=False)
if rd is None:
break
fail_run(rd,
f"Host {rd['bootstrax']['host']} said it was {state} "
f"at {rd['bootstrax']['time']}, but then didn't get further; "
f"perhaps it crashed on this run or is still stuck?")
# Runs for which, based on the run doc alone, we can tell they are in a bad state
# Mark them as failed.
failure_queries = [
({'bootstrax.state': 'done',
'end': None},
'Bootstrax state was done, but run did not yet end'),
({'bootstrax.state': 'done',
'data': {
'$not': {
'$elemMatch': {
"type": {
'$ne': 'live'}}}}},
'Bootstrax state was done, but no processed data registered'),
]
for query, failure_message in failure_queries:
while True:
send_heartbeat()
rd = consider_run(query)
if rd is None:
break
fail_run(rd, failure_message.format(**rd))
# Abandon runs which we already know are so bad that
# there is no point in retrying them
abandon_queries = [
({'tags': {
'$elemMatch': {
'name': 'bad'}}},
"Run has a 'bad' tag"),
]
for query, failure_message in abandon_queries:
query['bootstrax.state'] = {'$ne': 'abandoned'}
failure_message += ' -- run has been abandoned'
while True:
send_heartbeat()
rd = consider_run(query)
if rd is None:
break
fail_run(rd, failure_message.format(**rd))
abandon(mongo_id=rd['_id'])
if __name__ == '__main__':
if not args.undying:
main()
else:
while True:
try:
main()
except (KeyboardInterrupt, SystemExit):
raise
except Exception as fatal_error:
log.error(f'Fatal warning:\tran into {fatal_error}. Try '
f'logging error and restart bootstrax')
try:
log_warning(f'Fatal warning:\tran into {fatal_error}',
priority='error')
except Exception as warning_error:
log.error(f'Fatal warning:\tcould not log {warning_error}')
# This usually only takes a minute or two
time.sleep(60)
log.warning('Restarting main loop')
| true |
6e10378b940aa1e3b4934297fe23ade00f0fbd35 | Python | azurelysium/go-lang-practices | /bookwhales/spiders/jsonl_to_leveldb.py | UTF-8 | 544 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python2
#
# Put an item stored in Json Lines format file to LevelDB
#
import sys
import json
import leveldb
from tqdm import tqdm
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'usage: {} <jsonl file> <leveldb path>'.format(sys.argv[0])
sys.exit()
db = leveldb.LevelDB(sys.argv[2])
with open(sys.argv[1]) as f:
lines = f.readlines()
for i in tqdm(xrange(len(lines))):
item = json.loads(lines[i])
db.Put(item['isbn'], item['content'].encode('utf-8'))
| true |
635c66a7277cc54db7aa9b88bbfbff8a1528bede | Python | shelbytommy/DailyCodingProblem | /problem007/problem007.py | UTF-8 | 1,158 | 3.6875 | 4 | [] | no_license | """
This problem was asked by Facebook.
Given the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the
number of ways it can be decoded.
For example, the message '111' would give 3, since it could be decoded as 'aaa',
'ka', and 'ak'.
You can assume that the messages are decodable. For example, '001' is not allowed.
"""
def get_decode_counts(code):
code_int = int(code)
if len(code) == 1:
count = 1
elif len(code) == 2:
count = 1 + is_within_range(code_int)
else:
count = get_decode_counts(code[1:])
if is_within_range(code[:2]):
count += get_decode_counts(code[2:])
return count
def is_within_range(code):
code_int = int(code)
return 1 if code_int >= 1 and code_int <= 26 else 0
assert get_decode_counts('111') == 3
assert get_decode_counts('123') == 3
assert get_decode_counts('128') == 2
assert get_decode_counts('1234') == 3
assert get_decode_counts('1111') == 5
assert get_decode_counts('12321') == 6
assert get_decode_counts('101001') == 10
assert get_decode_counts('41') == 1
assert get_decode_counts('22') == 2
| true |
9c8e4672711340f7e7c7e421febc882184e1c7b9 | Python | Princeton-CDH/mep-django | /mep/books/migration_group_work_utils.py | UTF-8 | 6,544 | 2.640625 | 3 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | '''
Utility methods for group_works_by_uri data migration.
'''
import re
from django.conf import settings
from django.contrib.admin.models import CHANGE
from django.utils import timezone
from django.utils.text import slugify
def similar_titles(titles):
'''Check if the titles are similar enough to support merging.
Should only be called when titles are not exactly the same.
:param titles: list of string titles
:rtype: bool
'''
normalized_titles = set([
# lower case, ignore whitespace and punctuation
slugify(
# remove minimal stopwords
re.sub(r'\b(the|and|a|an)\b', '',
# make two-letter initials consistent (D.H. vs D. H.)
re.sub(r'\b([A-Z]\.)\s([A-Z]\.)(\s|$)', r'\1\2 ', title)
# convert to lower case for replacing stop words;
# replace em-dash with regular dash, since slugify
# handles differently
.lower().replace('–', '-')))
for title in titles
])
# if normalized titles have collapsed to a single title, then they
# are similar enough to merge
return len(normalized_titles) == 1
def ok_to_merge(works):
'''Check if a group of works is safe to merge, sanity checking
based on title & author.
:param titles: queryset of :class:`~mep.books.models.Work`
:rtype: bool
'''
# if more than one distinct title, check similar enough to merge
distinct_titles = works.values_list('title', flat=True).distinct()
if len(distinct_titles) != 1:
if not similar_titles(distinct_titles):
return False
# check that the set of authors matches across all works
creator_names = set()
# all creators across all works
for work in works:
authors = work.creators.filter(creator__creator_type__name='Author') \
.values_list('creator__person__name', flat=True)
creator_names.add(';'.join(authors) if authors else '')
if len(creator_names) > 1:
return False
return True
def create_logentry(obj, obj_repr, message, apps):
'''Create a log entry to document a change in a migration.'''
LogEntry = apps.get_model('admin', 'LogEntry')
User = apps.get_model('auth', 'User')
ContentType = apps.get_model('contenttypes', 'ContentType')
script_user = User.objects.get(username=settings.SCRIPT_USERNAME)
obj_content_type = ContentType.objects.get_for_model(obj.__class__).pk
LogEntry.objects.log_action(
user_id=script_user.id,
content_type_id=obj_content_type,
object_id=obj.pk,
object_repr=obj_repr,
change_message=message,
action_flag=CHANGE)
def merge_works(works, apps):
'''Merge a queryset of :class:`~mep.books.models.Work` records.
- First work in the queryset is used as the primary work
- Copies attributes that are not present to primary work
- Copies creators, genres, and subjects to primary work
- If Work titles vary, adds TITLEVAR tag to notes
- Uses earliest year of any present on the Works
- Reassociate all events with any Works to the primary work
- Merges notes from all works and documents the merge
- Create an admin LogEntry to document the change
- Deletes redundant works after merge
'''
# arbitrarily choose the first as the primary work record
primary_work = works[0]
# if the titles vary, add a note to indicate cleanup needed
titles = set([w.title for w in works])
if len(titles) > 1:
primary_work.notes += '\nTITLEVAR'
# use the earliest year of any present for the merged work
years = [w.year for w in works if w.year]
if years:
primary_work.year = min(years)
# get a list of works without the primary work
works = works.exclude(pk=primary_work.pk) # preserve as queryset
# copy over any missing attributes
for attr in ['mep_id', 'year', 'ebook_url', 'work_format']:
# If an attribute is set on a work and not the primary work,
# copy it over. First come, first serve.
for work in works:
if not getattr(primary_work, attr) and getattr(work, attr):
setattr(primary_work, attr, getattr(work, attr))
current_creators = {
c.person: c.creator_type
for c in primary_work.creator_set.all()
}
# combine creators, subjects, and genres
for work in works:
# combine creators
for creator in work.creator_set.all():
# if person is already associated with the work as
# the specified creator type, delete the redudant through record
if creator.person in current_creators and \
current_creators[creator.person] == creator.creator_type:
creator.delete()
else:
# otherwise reassociate creator with new primary work
creator.work = primary_work
creator.save()
# add to current creators lookup
current_creators[creator.person] = creator.creator_type
# combine subjects and genres in case there are differences
primary_work.subjects.add(*work.subjects.all())
primary_work.genres.add(*work.genres.all())
# update all events associated with any work to be associated
# with the new primary work
for work in works:
work.event_set.update(work_id=primary_work.pk)
# consolidate notes and preserve any merged MEP ids
# in case we need to find a record based on a deleted MEP id
# (e.g. for card import)
# get current date to record when this merge happened
iso_date = timezone.now().strftime('%Y-%m-%d')
notes = [primary_work.notes]
notes.extend([w.notes for w in works])
merge_message = 'Merged on %s with %s' % (
iso_date,
', '.join(['MEP id %s' % w.mep_id if w.mep_id
else '%s' % w.pk
for w in works]))
notes.append(merge_message)
primary_work.notes = '\n'.join(n for n in notes if n)
# delete merged works once we're done with them
works.delete()
# NOTE: could make a logentry for deletions,
# but not sure there is much value
# save any attribute and notes changed
primary_work.save()
# create a log entry to document the change
create_logentry(
primary_work,
'%s (%s)' % (primary_work.title, primary_work.year or '??'),
merge_message, apps)
return primary_work
| true |
1a4f17a19075f36a578a804ce9665009fc68a49d | Python | Aasthaengg/IBMdataset | /Python_codes/p02898/s181051318.py | UTF-8 | 166 | 2.859375 | 3 | [] | no_license | def main():
n,k= list(map(int,input().split()))
h= list(map(int,input().split()))
ans=0
for i in range(0,n):
if h[i]>=k:
ans+=1
print(ans)
main() | true |
ba0ba65323f1b0a1cb691b1c6f98cc95a6b7d650 | Python | Mohan110594/Bactracking-1 | /combination_sum.py | UTF-8 | 1,048 | 3.78125 | 4 | [] | no_license | // Did this code successfully run on Leetcode : Yes
// Any problem you faced while coding this : None
// Your code here along with comments explaining your approach:
for every combination we check if the su is equal to the given target.If yes we add it to the list.
Time complexity --> o(2**n)
space complexity --> o(n*(2**n))
class Solution:
def __init__(self):
self.result=[]
def backtrack(self,candidates,list1,target,index):
if sum(list1)>target:
return
if sum(list1)==target:
self.result.append(list(list1))
return
for i in range(index,len(candidates)):
list1.append(candidates[i]) #append the candidate
self.backtrack(candidates,list1,target,i) #recursion
list1.pop() #Remove the candidate
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
if candidates==None or len(candidates)==0:
return []
self.backtrack(candidates,[],target,0)
return self.result | true |
f3a1c2c6d64174956a1bac9e4cbcab65db50465d | Python | aaronr4043/Identifying-Regions-of-face-in-Photos | /FaceProject.py | UTF-8 | 9,399 | 3.296875 | 3 | [
"MIT"
] | permissive | #################################################
#
# Authors: James Hynes, Gytis Mackevicius, Aaron Renaghan, Seamus Timmons
#
# Project Title: Finding Regions of Face in Images
#
# Introduction: This program is designed to locate faces within images by using Image Processing techniques and Machine Learning
#
# Initial Thoughts and Ideas: James - I wanted to do something similar to this with my final year project, but with the
# added complexity of matching faces that had been seen before and adding in some machine
# learning. Therefore I feel as though this project will be beneficial and maybe I could
# incorporate some machine learning in with this aswell, though I see the complexity is
# already there as we have only touched on skin, not defining features as not every piece
# of skin is the face.
# Gytis - First impression, scary. The sound of identifying a face in a picture sounds like a lot
# work and an overall a daunting task. As I have no experience with identifying such an
# ROI and even though it looks difficult, sounds exciting and full of learning material.
# Aaron - I like the sounds of this project, looks quite complex perhaps we can threshold
# for skin initially and then from there all we should have to do is distinguish
# between skin and faces, maybe holes left in the faces such as mouths will help us
# identify the difference?
# Seamus - My initial thoughts on the project are that it will be quite difficult. While
# faces seem very distinct I imagine it wont be very easy to classify them for a
# computer. We have learned about thresholding skin which could help, but I feel it
# will be a challenging project.
#
# Final Thoughts and Reflections: James - I feel like this project has satisfied the criteria we set out to, although I
# am disappointed we could not incorperate the HOG method as I put a lot of research
# into it, but I am very happy with how the project turned out, especially with respect
# to how well it picks out the faces. I am also very happy with how the team worked
# together and the contribution from each member. If I were to do this project again
# there is very little I would change, had we more time I would have perhaps tried to
# reduce false negatives, but overall I am happy with how this project has turned out.
# Gytis - Overall, I am thrilled with the outcome that we managed to achieve. It is exhilarating
# to get my very hands on machine learning and interacting with it. The issues faced
# were quite different than to I am used to and made me aware of another perspective.
# The team that I had was a pleasure to work with and I am happy with the fluid
# workflow that we managed to accomplish. The guys had interesting ideas that otherwise
# I would have never thought of. Exciting project, pleasing results and a great team.
# Aaron - I am very happy with the end result of our project. It was bit of a slow burner
# initially, but given a bit of time we really made a lot of progress in a very short
# timespan. I think our project is a great example of what can be done when machine
# learning and image processing meet. I want to give props to the three lads who all had
# had some great ideas which were used in our final program.
# Seamus - My final thoughts on the project are surprising. While I initially though it
# would be very difficult our goal, it turned out to be a lot easier. Haar
# cascades turned out to be the simplest way to do it. OpenCV provides trained
# models for Haar so we don't have to train it (which is outside the scope of
# the project). I found the project very interesting and enjoyable overall, and
# thanks to Aaron who took the lead on the project giving us direction.
#
# Finish Date: 22/11/2017
#
# Algorithm Description: The Algorithm is a rather simple one, here it is broken down step by step
# Step 1. Import Libraries For use in the code
# Step 2. Load Our Classifier, We will need further down
# Step 3. User Selects their image and we extract the images dimensions
# Step 4. We Resize our image before making it GreyScale, This helps with our Haar Cascades as it will not work on
# (Low Pixel count faces? -- Someone reword that)
# Step 5. Run the grayscale image through the classifier search all possible boxes that could contain a face
# Step 6. Check for a percentage of skin pixels using thresholding
# Step 7. Draw box around each with that percentage
# Step 8. Resize our image back to its origional state
# Step 9. Display and write final result to file
# Importing our Libraries
import sys
import logging as log
import numpy as np
import cv2
import easygui
# Method to load a cascade classifier
def loadCascadeClassifier(fileName):
log.info("Loading Cascade Classifier")
log.debug("Loading file \"%s\"" % fileName)
try:
cascade = cv2.CascadeClassifier(fileName)
except Exception as e:
template = "An exception of type {0} occurred with arguments:\n {1!r}"
message = template.format(type(e).__name__, e.args)
log.critical(message)
sys.exit()
return cascade
# Method to load in an image
def loadImage(fileName):
log.info("Loading image")
log.debug("Loading file \"%s\"" % fileName)
try:
image = cv2.imread(fileName)
except Exception as e:
template = "An exception of type {0} occurred with arguments:\n {1!r}"
message = template.format(type(e).__name__, e.args)
log.critical(message)
sys.exit()
return image
# Method to check if detected region has skin in it
def detectSkin(image):
log.info("Detecting for skin")
log.info("Getting image properties")
height, width, channels = image.shape
log.info("Converting image to YUV color space")
yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
log.info("Extracting out U and V channels")
u = yuv[:,:,1]
v = yuv[:,:,2]
log.info("Masking out U and V channels for skin")
uMask = cv2.inRange(u, 105, 135)
vMask = cv2.inRange(v, 140, 165)
mask = cv2.bitwise_not(cv2.bitwise_and(uMask, vMask))
blackCount = 0
totalPixels = height * width
log.info("Getting skin pixels")
for i in range(1, width):
for j in range(1, height):
if mask[j,i] == 0:
blackCount += 1
log.info("Getting skin percentage")
percentSkin = round((blackCount * 100) / totalPixels)
log.info("Checking skin percentage")
if percentSkin >= 10:
return True
return False
# Method to perform Haar Cascades on an image
def haarCascades(image, classifier):
log.info("Performing face recognition")
log.debug("Getting image properties")
# Getting image dimensions
originalHeight, originalWidth, bpp = np.shape(image)
log.info("Upscaling image")
# Upscaling the image to accomodate Haar's
hugeImage = cv2.resize(image, None, fx=5, fy=5, interpolation=cv2.INTER_CUBIC)
log.info("Converting image to greyscale")
# Converting the image to greyscale to accomodate Haar's
grayscale = cv2.cvtColor(hugeImage, cv2.COLOR_BGR2GRAY)
log.info("Performing Haar cascade detection")
# Running the Haar Cascades and drawing boxes around detected faces
faceDetection = classifier.detectMultiScale(grayscale, 1.3, 5)
log.info("Drawing boxes around faces")
# Drawing boxes around detected regions of face
for (x, y, w, h) in faceDetection:
if detectSkin(hugeImage[y:y+h, x:x+w]):
cv2.rectangle(hugeImage, (x, y), (x + w, y + h), (255, 0, 0), 5)
log.info("Downscaling image to original size")
# Resizing the image back to its origional size
result = cv2.resize(hugeImage, (originalWidth, originalHeight)) # Resizing the image to fit the screen
return result
# Setting log file location and level
log.basicConfig(filename='haar.log', level=log.INFO)
# The Classifier we used provided by OpenCV
face_classifier = loadCascadeClassifier('haarcascade_frontalface_default.xml')
# Reading in our Image and then get its dimensions
originalImage = loadImage(easygui.fileopenbox(msg='Select Image For Face Detection'))
# Performing face detection on Haar cascades
finalResult = haarCascades(originalImage, face_classifier)
# Writing the image to file and displaying it
cv2.imwrite('FacesDetected.png', finalResult)
cv2.imshow('Detected Faces', finalResult)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
0c6f752038ffe44c49f77357e9930baa7901696f | Python | vita-epfl/collaborative-gan-sampling | /sampling/idpsampler.py | UTF-8 | 1,585 | 2.78125 | 3 | [] | no_license | from __future__ import division
import numpy as np
class IndependenceSampler():
def __init__(self, T=5, B=0):
self.d_curr = None
self.cnt_chain = 1
self.thin_period = T
self.burn_in = B
def set_score_curr(self, d_curr):
'''
burn-in
'''
self.d_curr = d_curr
def sampling(self, samples, sigmoids):
'''
mh
'''
assert samples.shape[0] == sigmoids.shape[0]
assert np.min(sigmoids) >= 0.0
assert np.max(sigmoids) <= 1.0
good_samples = []
curr_sample = None
cnt_good = 0
for i in range(samples.shape[0]):
# move
if self.next(sigmoids[i]):
cnt_good = cnt_good + 1
if cnt_good > self.burn_in:
curr_sample = samples[i]
# accept
if curr_sample is not None:
if self.cnt_chain > self.thin_period:
good_samples.append(curr_sample)
self.cnt_chain = 1
else:
self.cnt_chain = self.cnt_chain + 1
return np.asarray(good_samples, dtype=np.float32)
def next(self, d_next):
'''
@ param: x_next -> new sample
@ param: d_next -> score from discriminator
'''
if self.d_curr is not None:
alpha = min(1.0, d_next * (1.0 - self.d_curr) / (self.d_curr * (1.0 - d_next)))
if np.random.uniform(0, 1) > alpha:
return False
self.d_curr = d_next
return True
| true |
433f929f17bb64c6622503081ffd5268090405c5 | Python | carlos-gutier/DS-Unit-3-Sprint-2-SQL-and-Databases | /sprint_challenge/northwind.py | UTF-8 | 1,751 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import sqlite3
# In[2]:
conn = sqlite3.connect('northwind_small.sqlite3')
# In[4]:
# What are the ten most expensive items (per unit price) in the database?
with conn:
curs = conn.cursor()
curs.execute("""SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10;""")
most_exp = curs.fetchall()
print(most_exp)
# In[5]:
# What is the average age of an employee at the time of their hiring?
# (Hint: a lot of arithmetic works with dates.)
with conn:
curs = conn.cursor()
curs.execute("""SELECT AVG(birthdate - hiredate)
FROM Employee;""")
avg_age = curs.fetchall()
print(avg_age)
# In[6]:
# What are the ten most expensive items (per unit price)
# in the database and their suppliers?
with conn:
curs = conn.cursor()
curs.execute("""SELECT ProductName, UnitPrice, CompanyName
FROM Product, Supplier
WHERE Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10;""")
suppliers_n_most_exp = curs.fetchall()
print(suppliers_n_most_exp)
# In[7]:
# What is the largest category (by number of products in it)?
with conn:
curs = conn.cursor()
curs.execute("""SELECT CategoryName, Count(Product.CategoryId) as NumberOfProducts
FROM Category, Product
WHERE Category.Id = Product.CategoryId
GROUP BY CategoryName
ORDER BY NumberOfProducts DESC
LIMIT 1;""")
largest_category = curs.fetchall()
print(largest_category)
| true |
23aeb1427c038b065a381146a573abce794e351e | Python | MSSedusch/gardenlinux | /ci/tkn/util.py | UTF-8 | 4,886 | 2.703125 | 3 | [
"MIT"
] | permissive | import collections
import dataclasses
import enum
import json
import shutil
import subprocess
import time
import dacite
import dateutil.parser
def _tkn_executable():
if not (tkn := shutil.which('tkn')):
raise ValueError('did not find `tkn` in PATH')
return tkn
class StatusReason(enum.Enum):
RUNNING = 'Running'
FAILED = 'Failed'
SUCCEEDED = 'Succeeded'
PIPELINE_RUN_STOPPING = 'PipelineRunStopping'
PIPELINE_RUN_CANCELLED = 'PipelineRunCancelled'
@dataclasses.dataclass
class TknCondition:
lastTransitionTime: str
message: str
reason: StatusReason
status: str # either True|False, or Unknown
type: str
def run_tkn(*args, namespace: str='gardenlinux'):
tkn = _tkn_executable()
print(args)
result = subprocess.run(
[
tkn,
'--namespace', namespace,
*args,
'-o', 'json',
],
capture_output=True,
text=True,
)
return result
def _pipelinerun(name: str, namespace: str='gardenlinux'):
res = run_tkn(
'pipelinerun',
'describe',
name,
namespace=namespace,
)
if not res.returncode == 0:
print(res.stdout)
print(res.stderr)
raise RuntimeError(f'pipelinerun cmd returned {res.returncode}')
res_dict = json.loads(res.stdout)
return res_dict
def _run_status(dict_with_status: dict):
'''
determines the current status of a given tekton entity bearing a `status`.
Examples of such entities are:
- pipelineruns
- taskruns
the passed `dict` is expected to bear an attribute `status`, with a sub-attr `conditions`, which
in turn is parsable into a list of `TknCondition`
'''
if not 'status' in dict_with_status:
# XXX if we are too early, there is no status, yet
return None
status = dict_with_status['status']
conditions = [
dacite.from_dict(
data=condition,
data_class=TknCondition,
config=dacite.Config(
cast=[
StatusReason,
],
),
) for condition in status['conditions']
]
latest_condition = sorted(
conditions,
key=lambda c: dateutil.parser.isoparse(c.lastTransitionTime)
)[-1]
return latest_condition
def pipelinerun_status(name: str, namespace: str='gardenlinux'):
pipelinerun_dict = _pipelinerun(name=name, namespace=namespace)
return _run_status(dict_with_status=pipelinerun_dict)
def wait_for_pipelinerun_status(
name: str,
namespace: str='gardenlinux',
target_status: StatusReason=StatusReason.SUCCEEDED,
timeout_seconds: int=60*45, # 45 minutes
polling_interval_seconds: int=15,
):
start_time = time.time()
while (status := pipelinerun_status(name=name, namespace=namespace)) or True:
if not status is None:
reason = status.reason
if reason is target_status:
print(f'{target_status=} reached - build finished')
break
else:
reason = None
print(f'{reason=}')
if reason in (StatusReason.FAILED, StatusReason.PIPELINE_RUN_CANCELLED):
print(f'{reason=} - aborting')
raise RuntimeError(reason)
elif reason in (StatusReason.RUNNING, StatusReason.PIPELINE_RUN_STOPPING, None):
passed_seconds = time.time() - start_time
if passed_seconds > timeout_seconds:
raise RuntimeError(f'timeout exceeded: {timeout_seconds=}')
time.sleep(polling_interval_seconds)
else:
raise NotImplementedError(reason)
print(f'pipelinerun {name=} reached {target_status=}')
def pipeline_taskrun_status(name: str, namespace: str='gardenlinux'):
pipelinerun_dict = _pipelinerun(name=name, namespace=namespace)
status = pipelinerun_dict['status']
taskruns = status['taskRuns'] # {<taskrun-id>: <taskrun-status>}
succeeded_taskrun_names = []
pending_taskrun_names = []
failed_taskruns = []
for tr_id, tr_dict in taskruns.items():
status = _run_status(dict_with_status=tr_dict)
tr_name = tr_dict['pipelineTaskName']
if status.status.lower() == 'true':
succeeded_taskrun_names.append(tr_name)
elif status.status.lower() == 'unknown':
pending_taskrun_names.append(tr_name)
else:
failed_taskruns.append({
'name': tr_name,
'message': status.message,
})
TaskStatusSummary = collections.namedtuple(
'TaskStatusSummary',
['succeeded_names', 'pending_names', 'failed_details']
)
return TaskStatusSummary(
succeeded_names=succeeded_taskrun_names,
pending_names=pending_taskrun_names,
failed_details=failed_taskruns,
)
| true |
fc3ae6e97bb5163ece7b0c286e55559ed49fa8c3 | Python | sniperpj21/SpreadModel | /Results/output_parser.py | UTF-8 | 8,201 | 2.671875 | 3 | [] | no_license | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import int
from future.builtins import zip
from future.builtins import str
from future import standard_library
standard_library.install_hooks()
from future.builtins import object
import re
import Results.models
from ScenarioCreator.models import Zone, ProductionType
def camel_case_spaces(name_with_spaces):
return re.sub(r' (\w)', lambda match: match.group(1).upper(), name_with_spaces)
def number(string):
try:
return int(string)
except:
try:
return float(string)
except:
return -1
def build_composite_field_map( table):
road_map = {}
for prefix, field in table:
if prefix not in ('iteration', 'day', 'production_type', 'zone'): # skip the selector fields
road_map[prefix] = prefix
return road_map
class DailyParser(object):
def __init__(self, header_line, first_day_line):
self.headers = header_line.strip().split(',') # there was a trailing /r/n to remove
self.possible_zones = {x.name for x in Zone.objects.all()}.union({'Background'})
self.possible_pts = {x.name for x in ProductionType.objects.all()}.union({''})
self.failures = set()
if not Results.models.ResultsVersion.objects.exists():
values = first_day_line.split(',')
pairs = zip(self.headers, values)
sparse_values = {a: number(b) for a, b in pairs}
version = Results.models.ResultsVersion()
version.versionMajor = sparse_values['versionMajor']
version.versionMinor = sparse_values['versionMinor']
version.versionRelease = sparse_values['versionRelease']
version.save()
def populate_tables_with_matching_fields(self, model_class_name, instance_dict, sparse_info):
"""Populates all combinations of a particular table in one go. This method must be called once for each
model class that you want populated.
model_class_name: named of table defined in Results.models
instance_dict: a dictionary containing one instance of every combination of parameters. Keys are the "suffix" e.g. _Bull_HighRisk
sparse_info: Dictionary containing all the key, value pairs that the simulation put out
field_map: Keys are all column names to match to (prefix only), values are exact field name in that model. The distinction allows
the program to map multiple columns onto the same field. There are some special cases where column name is not exactly field + suffix.
"""
field_map = build_composite_field_map(getattr(Results.models, model_class_name)() ) # creates a table instance
keys_to_delete = []
for suffix_key, instance in instance_dict.items(): # For each combination: DailyByZoneAndProductionType with (Bull_HighRisk), (Swine_MediumRisk), etc.
instance_needed = False
for column_name, model_field in field_map.items():
if column_name + suffix_key in sparse_info:
setattr(instance, model_field, sparse_info[column_name + suffix_key])
instance_needed = True
try:
self.failures.remove(column_name + suffix_key)
except KeyError:
print('Error: Column was assigned twice. Second copy in %s.%s for output column %s.' % (model_class_name, model_field, column_name + suffix_key))
else:
pass # It's okay for the model to specify a field that the C Engine doesn't output. No harm done
if not instance_needed:
keys_to_delete.append(suffix_key)
for suffix_key in keys_to_delete:
del instance_dict[suffix_key]
return [instance for key, instance in instance_dict.items()]
def construct_combinatorial_instances(self, day, iteration, last_line):
"""This constructs a mapping between the name of the column 'suffix' for example: 'BackgroundCattle' and maps it
to the appropriate Django query settings to grab the matching model instance. For 'BackgroundCattle' the query
should be `DailyByZoneAndProductionType(production_type__name=Cattle, zone=None, ...`.
This handles the special blank case for both "All ProductionType" = '' and "Background Zone" = None.
It returns a dict which is the collection of all the model instances which will need to be populated each day:
1 DailyControls
1*pt DailyByProductionType
zones*pt DailyByZoneAndProductionType
zones*1 DailyByZone
"""
daily_instances = {table_name:{} for table_name in ["DailyByProductionType", "DailyByZone", "DailyByZoneAndProductionType", "DailyControls"]}
daily_by_pt = daily_instances["DailyByProductionType"]
for pt_name in self.possible_pts:
pt = ProductionType.objects.filter(name=pt_name).first() # obj or None for "All Animals" case
daily_by_pt[camel_case_spaces(pt_name)] = \
Results.models.DailyByProductionType(production_type=pt, iteration=iteration, day=day, last_day=last_line)
daily_instances["DailyByZone"] = {camel_case_spaces(zone_name):
Results.models.DailyByZone(zone=Zone.objects.filter(name=zone_name).first(), iteration=iteration, day=day, last_day=last_line) for zone_name in self.possible_zones}
daily_by_pt_zone = daily_instances["DailyByZoneAndProductionType"]
for pt_name in self.possible_pts:
pt = ProductionType.objects.filter(name=pt_name).first() # obj or None for "All Animals" case
for zone_name in self.possible_zones:
zone = Zone.objects.filter(name=zone_name).first() # obj or None for "Background" case
daily_by_pt_zone[camel_case_spaces(zone_name + pt_name)] = \
Results.models.DailyByZoneAndProductionType(production_type=pt, zone=zone, iteration=iteration, day=day, last_day=last_line)
daily_instances["DailyControls"] = {'': Results.models.DailyControls(iteration=iteration, day=day, last_day=last_line)} # there's only one of these
return daily_instances
def populate_db_from_daily_report(self, sparse_info, last_line):
"""Parses the C Engine stdout and populates the appropriate models with the information. Takes one line
at a time, representing one DailyReport."""
assert isinstance(sparse_info, dict)
# sparse_info = literal_eval(report.sparse_dict)
# print(sparse_info)
iteration = sparse_info['Run']
del sparse_info['Run']
day = sparse_info['Day']
del sparse_info['Day']
del sparse_info['versionMajor']
del sparse_info['versionMinor']
del sparse_info['versionRelease']
self.failures = set(sparse_info.keys()) # whatever is left is a failure
#construct the set of tables we're going to use for this day
daily_instances = self.construct_combinatorial_instances(day, iteration, last_line)
results = []
for class_name in daily_instances:
result = self.populate_tables_with_matching_fields(class_name, daily_instances[class_name], sparse_info) # there was a lot of preamble to get this line to work
results.extend(result)
if len(self.failures) and day == 1:
print('Unable to match columns: ', len(self.failures), sorted(self.failures))
return results
def parse_daily_strings(self, cmd_strings, last_line=False):
results = []
for cmd_string in cmd_strings:
values = cmd_string.split(',')
if len(values):
pairs = zip(self.headers, values)
sparse_values = {a: number(b) for a, b in pairs}
Results.models.DailyReport(sparse_dict=str(sparse_values), full_line=cmd_string).save()
results.extend(self.populate_db_from_daily_report(sparse_values, last_line))
return results
| true |
93bc9df4ae688c8764c4312a7624cd2b62255fca | Python | GuoYuanfang/ML-impletation | /linear classification.py | UTF-8 | 8,013 | 3.1875 | 3 | [] | no_license | # @Time : 2018/9/17 15:43
# @Author : Guo Yuanfang
# @File : linear classification.py
# @Software: PyCharm
# 本篇中考虑机器学习各种线性分类算法实现
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.metrics import f1_score, classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import svm
# ——————————分类问题——————————
## 利用 make_classification函数生成数据集
data = datasets.make_classification(n_samples=100, n_features=10, n_informative=5, n_redundant=0, n_classes=2)
## data[0]: X, data[1] = y
data2fea = datasets.make_classification(n_samples=10, n_features=2, n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1)
# 感知机
def perceptron(X, y):
# y = wTx + b
# y ∈ {1, -1}
# wTx + b > 0 -> y = 1
# wTx + b < 0 -> y = -1
if set(y) == set((0, 1)):
y = np.sign(y - 0.5)
num_sample, num_feature = X.shape
eta = 0.5 # learning rate
# 初始化 w,b
w = np.zeros(num_feature)
b = 0
# 选取sample(顺序选取/随机选取)
while True:
count = 0
for i in range(num_sample):
if y[i] * (w.dot(X[i]) + b) <= 0:
w = w + eta * y[i] * X[i]
b = b + eta * y[i]
break
count += 1
if count == num_sample:
break
return w, b
def logistics(X, y):
# 随机梯度下降法
num_sample, num_feature = X.shape
eta = 0.05 # fixed learning rate
num_sample, num_feature = X.shape
one = np.ones((num_sample, 1))
Xh = np.mat(np.hstack((X, one)))
yh = np.mat(y).T
wh = np.mat(np.ones((num_feature + 1, 1))) # 保存上一个w值
wn = np.mat(np.zeros((num_feature + 1, 1))) # 保存最新的w值
grad = 0
step = 0
while np.linalg.norm(wh - wn) > 0.001:
step += 1
print("step{}: {}".format(step, wn))
sam_selected = np.random.randint(0, num_sample) # 选择某一条sample求梯度
xs = np.mat(Xh[sam_selected])
ys = np.mat(y[sam_selected])
ex = np.exp(wn * xs) / (np.exp(wn * xs) + 1)
grad = (-ys * xs + xs * ex)
wh = wn
wn = wn - eta * grad
print("\n随机梯度下降算法共迭代{}次得到答案w:\n {}".format(step, wn))
return
# 调库实现
def test_perceptron():
# test perceptron
# XTemp = np.array([[3, 3], [5, 4], [4, 3], [4, -4], [1, 1]])
# XTempT = XTemp.T
# yTemp = np.array([1, 1, 1, -1, -1])
data2fea = datasets.make_classification(n_samples=10, n_features=2, n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1)
XTemp = data2fea[0]
yTemp = data2fea[1]
if set(yTemp) == set((0, 1)):
yTemp = np.sign(yTemp - 0.5)
ind1 = np.argwhere(yTemp == 1)
ind0 = np.argwhere(yTemp == -1)
plt.scatter(XTemp[ind1, 0], XTemp[ind1, 1])
plt.scatter(XTemp[ind0, 0], XTemp[ind0, 1])
w, b = perceptron(XTemp, yTemp)
minX = XTemp.min(0)[0]
maxX = XTemp.max(0)[0]
plt.plot([minX, maxX], [-(b + w[0] * minX) / w[1], -(b + w[0] * maxX) / w[1]])
plt.show()
print('w: {}'.format(w))
print('b: {}'.format(b))
def test_logistics():
# test logistics
# XTemp = np.array([[3, 3], [5, 4], [4, 3], [4, -4], [1, 1]])
# XTempT = XTemp.T
# yTemp = np.array([1, 1, 1, -1, -1])
data2fea = datasets.make_classification(n_samples=10, n_features=2, n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1)
XTemp = data2fea[0]
yTemp = data2fea[1]
if set(yTemp) == set((0, 1)):
yTemp = np.sign(yTemp - 0.5)
ind1 = np.argwhere(yTemp == 1)
ind0 = np.argwhere(yTemp == -1)
plt.scatter(XTemp[ind1, 0], XTemp[ind1, 1])
plt.scatter(XTemp[ind0, 0], XTemp[ind0, 1])
w, b = logistics(XTemp, yTemp)
minX = XTemp.min(0)[0]
maxX = XTemp.max(0)[0]
plt.plot([minX, maxX], [-(b + w[0] * minX) / w[1], -(b + w[0] * maxX) / w[1]])
plt.show()
print('w: {}'.format(w))
print('b: {}'.format(b))
class multi_classification:
def __init__(self, model='ovo'):
self.model = model
def fit(self, X, y):
self.num_sample, self.num_feature = X.shape
set_target = set(y)
num_target = len(set_target)
dict_x = {}
dict_y = {}
classifiers = []
for i in set_target:
dict_x[i] = X[y == i]
dict_y[i] = y[y == i]
if self.model == 'ovo':
# 训练各个单对单分类器
for i in set_target:
for j in set_target:
if i < j:
X_temp = np.vstack((dict_x[i], dict_x[j]))
y_temp = np.hstack((dict_y[i], dict_y[j]))
classifier = svm.SVC() # 单个分类器
classifier.fit(X_temp, y_temp)
classifiers.append(classifier)
elif self.model == 'ovr':
for i in range(num_target):
y_temp = y.copy()
y_temp = y_temp + 1
y_temp[y_temp != i+1] = 0
classifier = svm.SVC()
classifier.fit(X,y_temp)
classifiers.append(classifier)
else:
print("No such model!")
self.set_target = set_target
self.num_target = num_target
self.classifiers = classifiers
def get_mode(self,x):
# 取每一行的众数
# x须为numpy数组,dtype为int
m, n = x.shape
modes = np.zeros(m)
x = x.astype(int)
for i in range(len(x)):
bin_count = np.bincount(x[i])
if self.model == 'ovr':
bin_count = bin_count[1:]
modes[i] = np.argmax(bin_count)
return modes
def predict(self, X):
y_preds = np.zeros((self.num_sample, len(self.classifiers)))
for i in range(len(self.classifiers)):
y_preds[:, i] = self.classifiers[i].predict(X)
y_pred = self.get_mode(y_preds)
print(y_pred)
return y_pred
# def multi_cl_ovo(X,y):
# set_target = set()
# num_target = len(set_target)
#
# dict_x = {}
# dict_y = {}
# lrs = [[i for i in range(num_target)] for i in range(num_target)] # list of one-vs-one LinearRegression
#
#
# for i in set_target:
# dict_x[i] = X[y==i]
# dict_y[i] = y[y==i]
#
# # 训练各个单对单分类器
# for i in set_target:
# for j in set_target:
# if i < j:
# X_temp = np.vstack((dict_x[i],dict_x[j]))
# y_temp = np.vstack((dict_y[i],dict_y[j]))
# lr = LinearRegression() # 单个分类器
# lr.fit(X_temp,y_temp)
# lrs[i,j] = lr
# return
def multi_cl_ovr():
return
def test_multi_ovo():
# 产生数据
## sklearn中iris数据集,为一个多(3)目标分类数据集
iris = datasets.load_iris()
X_iris = iris.data
y_iris = iris.target
Xtrain, y_train, X_test, y_test = train_test_split(X_iris, y_iris, test_size=0.3)
movo = multi_classification()
movo.fit(X_iris, y_iris)
movo.predict(X_iris)
return
def test_multi_ovr():
# 产生数据
## sklearn中iris数据集,为一个多(3)目标分类数据集
iris = datasets.load_iris()
X_iris = iris.data
y_iris = iris.target
Xtrain, y_train, X_test, y_test = train_test_split(X_iris, y_iris, test_size=0.3)
movo = multi_classification('ovr')
movo.fit(X_iris, y_iris)
movo.predict(X_iris)
return
if __name__ == '__main__':
# test_perceptron()
# test_logistics()
# test_multi_ovo()
test_logistics() | true |
41793d3e1292ed45cb8842291781fe18180e66bd | Python | davidngu3/leetcode | /599_minimum_index_sum_of_two_lists.py | UTF-8 | 910 | 3.578125 | 4 | [] | no_license | import math
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
# create dictionary of restaurants in list1, storing index as value
seen = {}
for index, restaurant in enumerate(list1):
seen[restaurant] = index
# for each restaurant in list2, check if in dictionary and keep track of lowest index
# can terminate if lowestSum is smaller than index here
answer = []
lowestSum = math.inf
for index, restaurant in enumerate(list2):
if restaurant in seen:
newSum = index + seen[restaurant]
if newSum == lowestSum:
answer.append(restaurant)
elif newSum < lowestSum:
answer = [restaurant]
lowestSum = newSum
return answer
| true |
fc7dcd694e08ee46b584c20d2be090da4428bbf9 | Python | herkamg/Fundamentals-of-Computing | /week_1_b/fct_is_even.py | UTF-8 | 346 | 4.3125 | 4 | [] | no_license | def is_even(number):
""" This function check if a number is even or odd
and return true when even else false
"""
return number % 2 == 0
def test(number):
""" Test the function is_even function """
if is_even(number):
print(number, "is even")
else:
print(number, "is odd")
test(8)
test(3)
test(12) | true |
746706274009f38e29653b892c7180b4f2459c28 | Python | samoel07/desafios-python | /DESAFIO 55.py | UTF-8 | 151 | 3.140625 | 3 | [] | no_license | num = (1, 2, 3, 4)
print(num[0])
#tuple é um tipo de lista mais a diferencia é que n pode substituir os seus elementos.
#e se usa as () em vez []. | true |
9e217ed4b6188e531ed11887476949081e009ac2 | Python | harshil1903/leetcode | /HashTable/0692_top_k_frequent_words.py | UTF-8 | 1,061 | 4.03125 | 4 | [] | no_license |
# 692. Top K Frequent Words
#
# Source : https://leetcode.com/problems/top-k-frequent-words/
#
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest. If two words have the same frequency, then the word with the lower alphabetical order comes
# first.
from collections import Counter
from typing import List
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
count = Counter(words)
candidates = count.keys()
# sort the dictionary keys first by frequency of the words, then sort by the word itself
# using lambda function in the 'key' parameter below
# -count[x] means descending order of count,
# x means sort by the alphabetical values of the words
res = sorted(candidates, key=lambda x: (-count[x], x))
return res[:k]
if __name__ == '__main__':
s = Solution();
print(s.topKFrequent(["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"],4)) | true |
d83c44a8a147af0ce5f6bf36cd895429d460154d | Python | Pallavi2802/summer | /tkinter/calci.py | UTF-8 | 2,124 | 2.71875 | 3 | [] | no_license | from tkinter import *
root=Tk()
def display(t):
global s1
s1=s.get()+t
s.set(s1)
def display1():
global s1
s1=''
s.set(s1)
def Result():
global s1
s1=s.get()
s1=str(eval(s1))
s.set(s1)
s1=''
s=StringVar()
root.title("Calculator")
f0=Frame(root,bg='white')
f1=Frame(root,bg='white')
f2=Frame(root,bg='white')
f3=Frame(root,bg='white')
f4=Frame(root,bg='white')
f5=Frame(root,bg='white')
e1=Entry(f0,textvariable=s,bg='lightyellow',fg='black',bd=5,justify='right',font=('arial',20,'bold'),relief=RAISED)
e1.pack(fill=BOTH,expand=YES,padx=5,pady=5)
for i in 'C':
b=Button(f1,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=display1)
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
for i in '/':
b=Button(f1,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=lambda x=i:display(x))
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
for i in '789*':
b=Button(f2,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=lambda x=i:display(x))
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
for i in '456-':
b=Button(f3,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=lambda x=i:display(x))
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
for i in '123+':
b=Button(f4,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=lambda x=i:display(x))
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
for i in '%0.':
b=Button(f5,text=i,bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=lambda x=i:display(x))
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
b=Button(f5,text='=',bg='lightyellow',fg='black',bd=5,font=('arial',20,'bold'),relief=RAISED,command=Result)
b.pack(side=LEFT,fill=BOTH,expand=YES,padx=5,pady=5)
f0.pack(fill=BOTH,expand=YES)
f1.pack(fill=BOTH,expand=YES)
f2.pack(fill=BOTH,expand=YES)
f3.pack(fill=BOTH,expand=YES)
f4.pack(fill=BOTH,expand=YES)
f5.pack(fill=BOTH,expand=YES)
root.mainloop() | true |
52bbd1de7f8da3dca81015e4112f9100f0310a64 | Python | KarinSu97/python | /计算机视觉/Opencv教程/图像处理/图像转换操作.py | UTF-8 | 368 | 2.984375 | 3 | [] | no_license | import cv2
from matplotlib.pyplot import *
#将图像转化为灰色图像
grayimage=cv2.imread('C:\\Users\\T\\Downloads\\5.jpg',cv2.IMREAD_GRAYSCALE)
cv2.imwrite('C:\\Users\\T\\Downloads\\5.png',grayimage)
#将指定区域替换为另一个区域
image=cv2.imread('C:\\Users\\T\\Downloads\\4.jpg')
imshow(image)
image[0:354,0:480]=image[500:854,0:480]
imshow(image) | true |
5d8fa8b57e682f6c6b42346f5d0adcb889f12c54 | Python | muntac/basic-blog | /handlers/BaseHandler.py | UTF-8 | 1,861 | 2.59375 | 3 | [] | no_license | import webapp2
import jinja2
import os
from db.User import User
from utils.validation import validation
class BaseHandler(webapp2.RequestHandler):
#Path variables/jinja initialization
current_path = os.path.dirname(__file__)
parent_path = os.path.abspath(os.path.join(current_path, os.pardir))
parent_dir = os.path.join(parent_path, 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(parent_dir), autoescape = True)
def write(self, *a, **kw):#shortcut for self.response.write
self.response.write(*a, **kw)
def render_str(self, template, **kw):#Uses Jinja render() to render template and return it as a unicode string
jinja_tempobj = self.jinja_env.get_template(template) #loads template from the environment
return jinja_tempobj.render(kw)
def render(self, template, **kw):#Simply calls write on the Jinja template string
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = validation.make_secure_val(val)
self.response.headers.add_header('Set-Cookie', '%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val:
return validation.check_secure_val(cookie_val)
def login(self, userobj):
self.set_secure_cookie('user_id', str(userobj.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
#if there are other user related cookies those need to be deleted too
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
if uid:
self.user = User.get_by_id(int(uid))
else:
self.user = None
| true |
6eb853e4fac81fbdbfbafc0cb8fba0f8fe216add | Python | AlphaLambdaMuPi/DroneServer | /role.py | UTF-8 | 2,131 | 2.609375 | 3 | [] | no_license | #! /usr/bin/env python3
import asyncio
import logging
logger = logging.getLogger()
class Role:
def __init__(self, data, conn, server):
self.name = data['name']
self._conn = conn
self._server = server
@asyncio.coroutine
def run(self):
yield from self._run()
yield from self.close()
@asyncio.coroutine
def _run(self):
pass
@asyncio.coroutine
def close(self):
yield from self._conn.close()
class Control(Role):
def __init__(self, data, conn, server):
super().__init__(data, conn, server)
self._target = None
def set_drone(self, drone):
self._target = drone
@asyncio.coroutine
def _run(self):
while self._conn.alive():
data = yield from self._conn.recv()
if data is None:
break
res = self._server.send_command_to_drone(self._target, data)
if not res:
self._conn.send({"Error":"drone was gone(?)"})
@asyncio.coroutine
def close(self):
yield from super().close()
class Drone(Role):
def __init__(self, data, conn, server):
super().__init__(data, conn, server)
self._ipaddr = data.get('ip', None)
self._status = False
@asyncio.coroutine
def _run(self):
nonecnt = 0
while self._conn.alive() and nonecnt <= 5:
data = yield from self._conn.recv()
if data is None:
nonecnt += 1
elif data.get('status', None):
self._status = data['status']
logger.debug('receive from {}: {}'.format(self.name, data))
def get_command(self, command):
if not self._conn.alive():
return False
try:
self._conn.send(command)
return True
except ConnectionError:
logger.debug('Drone {} lost connection.'.format(self.name))
return False
def get_status(self):
return {'status':self._status, 'ip': self._ipaddr}
@asyncio.coroutine
def close(self):
yield from super().close()
| true |
f9f427fe500a3d7b073461db7b2f5c947493b0d7 | Python | iyanuashiri/dj-cart | /tests/tests.py | UTF-8 | 506 | 2.53125 | 3 | [
"MIT"
] | permissive | import pytest
# Create your tests here.
@pytest.mark.django_db
def test_cart_model(cart, cartitem):
assert cart.checked_out is True
assert cart.cart_price() == 1000
assert cart.count() == 1
assert cart.serializable() == [{'product_id': 1, 'quantity': 10, 'price': 100.00}]
@pytest.mark.django_db
def test_cart_item_model(cartitem, cart):
assert cartitem.cart == cart
assert cartitem.quantity == 10
assert cartitem.price == 100.00
assert cartitem.total_price == 1000
| true |
3dd66a6dcf939de031b4a68e083990c6a0ce7e36 | Python | lynetteoh/comp3331-Computer-Networks-Assignment- | /random_generator.py | UTF-8 | 173 | 2.84375 | 3 | [] | no_license |
import random
random.seed(100)
with open("random.txt", "w+") as f:
for i in range(0, 150):
f.write("count: {} random:{} \n".format(i+1, random.random()))
| true |
6859bda66ed29db135202ecfcab418561bdd46ab | Python | kwngo/pave-subway-challenge | /examples.py | UTF-8 | 1,013 | 3.53125 | 4 | [] | no_license | import subway
subway_system = subway.SubwaySystem()
subway_system.add_train_line(stops=["Canal", "Houston", "Christopher", "14th"], name="1")
subway_system.add_train_line(stops=["Spring", "West 4th", "14th", "23rd"], name="E")
subway_system.add_train_line(stops=["Wall", "Fulton", "Park Place", "Chambers", "14th", "34th"], name="2")
path = subway_system.take_train(origin="Houston", destination="23rd")
print("Answer to Challenge 1:", path)
subway_system_with_times = subway.SubwaySystem()
subway_system_with_times.add_train_line(stops=["Canal", "Houston", "Christopher", "14th"], name="1", time_between_stations=[("Canal", "Houston", 3), ("Houston", "Christopher", 7),("Christopher", "14th", 2)])
subway_system_with_times.add_train_line(stops=["Spring", "West 4th", "14th", "23rd"], name="E",time_between_stations=[("Spring", "West 4th", 1),("West 4th", "14th", 5),("14th", "23rd", 2),])
path2 = subway_system_with_times.take_train(origin="Houston", destination="23rd")
print("Answer to Challenge 2:", path2)
| true |
3a695a34ec1dccfe0dfcd80167497b5f43ab1921 | Python | SongWang2017/Deep-Learning-From-Scratch | /testing/testing_initialization.py | UTF-8 | 664 | 2.875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
x = np.random.randn(1000, 100)
node_num = 100
hidden_layer_size = 5
activation = {}
for i in range(hidden_layer_size):
if i != 0:
x = activation[i - 1]
#w = np.random.randn(node_num, node_num) * 1
#w = np.random.randn(node_num, node_num) * 0.01
w = np.random.randn(node_num, node_num) / np.sqrt(node_num)
z = np.dot(x, w)
a = sigmoid(z)
activation[i] = a
for i, a in activation.items():
plt.subplot(1, len(activation), i + 1)
plt.title(str(i + 1) + "-layer")
plt.hist(a.flatten(), 30, range=(0, 1))
plt.show() | true |
5f38d37c1f1d6ccf1c9d002e55837420dbc08be7 | Python | hadi-ibrahim/Drunk-Driver-Classification | /convert.py | UTF-8 | 4,292 | 2.765625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import win32com.client
import time
import os
import shutil
import glob
def ppt_to_mp4(ppt_path,mp4_target,resolution = 720,frames = 24,quality = 60,timeout = 120):
# status:Convert result. 0:failed. -1: timeout. 1:success.
status = 0
if ppt_path == '' or mp4_target == '':
return status
# start_tm:Start time
start_tm = time.time()
# Create a folder that does not exist.
sdir = mp4_target[:mp4_target.rfind('\\')]
if not os.path.exists(sdir):
os.makedirs(sdir)
# Start converting
ppt = win32com.client.Dispatch('PowerPoint.Application')
presentation = ppt.Presentations.Open(ppt_path,WithWindow=False)
# CreateVideo() function usage: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.presentation.createvideo
presentation.CreateVideo(mp4_target,-1,1,resolution,frames,quality)
while True:
try:
time.sleep(0.1)
if time.time() - start_tm > timeout:
# Converting time out. Killing the PowerPoint process(An exception will be threw out).
os.system("taskkill /f /im POWERPNT.EXE")
status = -1
break
if os.path.exists(mp4_path) and os.path.getsize(mp4_target) == 0:
# The filesize is 0 bytes when convert do not complete.
continue
status = 1
break
except Exception as e:
print('Error! Code: {c}, Message, {m}'.format(c = type(e).__name__, m = str(e)))
break
print(time.time()-start_tm)
if status != -1:
ppt.Quit()
return status
if __name__ == '__main__':
videos = [i.split(os.path.sep)[1] for i in glob.glob('Files/Output/*.pptx')]
for video in videos:
videoTitle, videoExtension = video.split(".")
# Require Windows system(Media Player was enabled) and Microsoft Office 2010 or higher.
# Converting ppt into video relies on Windows Media Player. So you need to enable Desktop Experience feature.
# More save types please visit: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype
# quality:0-100. The level of quality of the slide. The higher the number, the higher the quality.
quality = 60
# resolution:The resolution of the slide. 480,720,1080...
resolution = 720
# frames: The number of frames per second.
frames = 24
# ppt_path:The ppt/pptx/pptm file path.
mp4_path = os.path.abspath("Files/Output/" + videoTitle + ".mp4")
ppt_path = os.path.abspath("Files/Output/" + videoTitle + ".pptx")
# ie_temp_dir:The convert cache file path.
# The default path (hidden) is 'C:/Users/username/AppData/Local/Microsoft/Windows/Temporary Internet Files/Content.MSO/ppt'.
# Or 'C:/Users/username/AppData/Local/Microsoft/Windows/INetCache/Content.MSO/ppt'
# You can find the cache folde at IE setting.
# If you don't want clear cache files,assign ie_temp_dir with empty string.
#ie_temp_dir = 'C:/Users/username/AppData/Local/Microsoft/Windows/INetCache/Content.MSO/ppt'
ie_temp_dir = ''
# status:Converting result. 0:failed. -1: timeout. 1:success.
status = 0
# timeout: Seconds that converting time out.
timeout = 4*60
try:
status = ppt_to_mp4(ppt_path,mp4_path,resolution,frames,quality,timeout)
# Clear PowerPoint cache after convert completed. When you converted hundreds of files, the cache folder will be huge.
if ie_temp_dir != '':
shutil.rmtree(ie_temp_dir, ignore_errors=True)
except Exception as e:
print('Error! Code: {c}, Message, {m}'.format(c = type(e).__name__, m = str(e)))
if status == -1:
print('Failed:timeout.')
elif status == 1:
print('Success!')
else:
if os.path.exists(mp4_path):
os.remove(mp4_path)
print('Failed:The ppt may have unknow elements. You can try to convert it manual.')
ppts = [i.split(os.path.sep)[1] for i in glob.glob('Files/Output/*.pptx')]
for ppt in ppts:
os.remove("Files/Output/"+ ppt)
| true |
5c5a476e2620eb00c27a2b17a433c030a16ff014 | Python | KeyiT/wikidata_mining | /build_classifier.py | UTF-8 | 11,511 | 2.90625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | from sys import argv
from sklearn import preprocessing
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
import json
import pickle
from preprocess_wikidata import Preprocessor
def _prfs_scoring(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall', 'f-score'),
sample_weight=None, metric_idx=0):
return metrics.precision_recall_fscore_support(
y_true, y_pred, beta=beta, labels=labels,
pos_label=pos_label, average=average,
warn_for=warn_for, sample_weight=sample_weight)[metric_idx]
class ClassifierTrainer:
def __init__(self, estimator, hyper_params=None):
self.estimator = estimator
self.hyper_params = hyper_params
self._model = None
self._label_encoder = None
def train(self, X_train, y_train, hyper_params=None,
train_perform_data_path=None, model_path=None,
n_jobs=10, cv=5):
"""
Train estimator and tune hyper parameters to find the best
estimator via k-fold cross validation. Update the estimator
as the best estimator found by cross validation.
Parameters
----------
:param X_train: array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
:param y_train: array-like, shape = [n_samples] or [n_samples, n_output]
Target relative to X for classification or regression.
:param hyper_params: dict or list of dictionaries, optional
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
If specified, the hyper parameters will of the instance will be
updated.
If the hyper parameters are not specified in neither initializer
nor this function, a ValueError will be raised.
:param train_perform_data_path: string, optional
The destination file path. If this parameter are specified,
the training performance (F Sore of every possible combination of
hyper parameters) will be written into this file as json format.
:param model_path: string, optional
The destination file path. If this parameter are specified,
the best estimator will be written into this file as pks bin format.
:param n_jobs: int, optional (default=10)
The number of jobs to run in parallel.
:param cv: int, cross-validation generator or an iterable, optional (default=5)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
Return
----------
:return: GridSearchCV
"""
# update hyper parameters to be tuned
if hyper_params is not None:
self.hyper_params = hyper_params
# test parameters
if self.hyper_params is None:
raise ValueError("Hyper parameters are not set!")
# tune hyper parameters, do k-fold cross validation to find the best hyper parameters
fscore = metrics.make_scorer(_prfs_scoring, average='macro', metric_idx=2)
print "\n start training..."
gs = GridSearchCV(self.estimator, self.hyper_params, cv=cv, scoring=fscore, n_jobs=n_jobs)
gs.fit(X_train, y_train)
self._model = gs.best_estimator_
print "training done! \n"
print("Best parameters set found on development set:")
print(gs.best_params_)
print("Grid scores on development set:")
means = gs.cv_results_['mean_test_score']
stds = gs.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, gs.cv_results_['params']):
print "%0.3f (+/-%0.03f) for %r" % (mean, std, params)
# write training performance as json file
if train_perform_data_path is not None:
training_json = []
for mean, std, params in zip(means, stds, gs.cv_results_['params']):
res = dict(params)
res['mean'] = mean
res['std'] = std
training_json.append(res)
with open(train_perform_data_path, "w") as f:
json.dump(training_json, f)
# save model
if model_path is not None:
self.save_model(model_path)
# encode labels
self._label_encoder = preprocessing.LabelEncoder()
self._label_encoder.fit(y_train)
return gs
def test(self, X_test, y_test, test_perform_data_path=None):
"""
Test estimator stored in this instance.
Parameters
----------
:param X_test: array-like, shape = [n_samples, n_features]
Testing vector, where n_samples is the number of samples and
n_features is the number of features.
:param y_test: array-like, shape = [n_samples]
Target relative to X for classification or regression.
:param test_perform_data_path: string, optional
The destination file path. If this parameter are specified,
the testing performance (in terms of precision, recall, F-Measure, Support)
will be written into this file as json format.
Returns
----------
:return: array-like, shape = [n_samples]
Output of the estimator.
"""
# transform label to integer
y_true = self._label_encoder.transform(y_test)
# make prediction on testing data set
y_pred_label = self._model.predict(X_test)
y_pred = self._label_encoder.transform(y_pred_label)
# measure performance
metric = metrics.precision_recall_fscore_support(y_true, y_pred, average=None)
print "\n testing performance: "
print self._label_encoder.classes_
for met in metric:
print met
if test_perform_data_path is not None:
test_perform_json = []
with open(test_perform_data_path, "w") as f:
idx = 0
for cl in self._label_encoder.classes_:
test_perform_json.append({cl:
{'precision': metric[0][idx],
'recall': metric[1][idx],
'F': metric[2][idx],
'support': metric[3][idx]}
})
idx += 1
json.dump(test_perform_json, f)
return y_pred_label
def save_model(self, model_path):
"""
Save the current estimator of the instance.
Parameters
----------
:param model_path: string
The destination file path.
"""
with open(model_path, 'wb') as f:
pickle.dump(self._model, f)
def get_model(self):
"""
Get the current estimator of the instance.
Returns
---------
:return: estimator.
"""
return self._model
def set_model(self, model):
self._model = model
def predict(self, X):
"""
Make a prediction.
Parameters
----------
:param X: array-like, shape = [n_samples, n_features]
Input vector, where n_samples is the number of samples and
n_features is the number of features.
Returns
---------
:return: array-like, shape = [n_samples]
Output relative to X of the estimator.
"""
return self._model.predict(X)
def get_args():
args = {}
raw = list(argv)
while raw:
if raw[0][0] == '-':
args[raw[0]] = raw[1]
raw = raw[1:]
return args
def train_test(model, params, train_test_split_ratio=0.1, n_jobs_cv=10):
"""
Train and Test input classification model. K-fold cross validation is carried
out to search for the best hyper parameters.
:param model: estimator
Estimator to be trained and tested.
:param params: dict
Parameter dictionary.
:param train_test_split_ratio: float, optional (default=0.1)
The percentage of test data with respect to the total data size.
:param n_jobs_cv: int, optional (default=10)
The number of jobs (cross validation) executed in parallel.
"""
args = get_args()
if '--src_json_data' in args:
src_data_path = args['--src_json_data']
else:
raise ValueError("Source json data file path is empty")
train_perform_data_path = None
if '--train_perform' in args:
train_perform_data_path = args['--train_perform']
test_perform_data_path = None
if '--test_perform' in args:
test_perform_data_path = args['--test_perform']
model_path = None
if '--model_bin' in args:
model_path = args['--model_bin']
# load json data
X = Preprocessor.assemble_feature_to_matrix_from_file(src_data_path)
y = Preprocessor.assemble_labels_to_vector_from_file(src_data_path)
# split training and test data set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=train_test_split_ratio
)
# build k-fold cross validation trainer
ct = ClassifierTrainer(model, params)
# train
ct.train(X_train=X_train, y_train=y_train,
train_perform_data_path=train_perform_data_path,
model_path=model_path, n_jobs=n_jobs_cv)
# test
ct.test(X_test, y_test, test_perform_data_path)
def build_lr(multi_class='ovr', solver='liblinear', penalty='l1',
n_jobs_estimator=1):
# initialize estimator
model = LogisticRegression(class_weight='balanced', n_jobs=n_jobs_estimator,
penalty=penalty, solver=solver,
multi_class=multi_class)
return model
def build_rf(class_weight='balanced_subsample', n_jobs_estimator=10):
# initialize estimator
model = RandomForestClassifier(class_weight=class_weight, n_jobs=n_jobs_estimator)
return model
def build_gbdt(learning_rate=0.1):
# initialize estimator
model = GradientBoostingClassifier(learning_rate=learning_rate)
return model
| true |
55fc6258c530dd90c7c24f2c57ae447cfe53a112 | Python | mozman/svgwrite | /tests/test_path.py | UTF-8 | 2,067 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: test path class
# Created: 18.09.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import sys
import unittest
from svgwrite.path import Path
class TestPath(unittest.TestCase):
def test_constructor(self):
p = Path(d="M 0 0", pathLength=100)
self.assertEqual(p['pathLength'], 100)
self.assertEqual(p.tostring(), '<path d="M 0 0" pathLength="100" />')
# init path with command-string
p = Path(d='M 10,7')
self.assertEqual(p.tostring(), '<path d="M 10,7" />')
# init path with a tuple of values
p = Path(d=('M', 9, 9))
self.assertEqual(p.tostring(), '<path d="M 9 9" />')
def test_flat_commands(self):
p = Path(d="M 0 0")
self.assertEqual(p.tostring(), '<path d="M 0 0" />')
# push separated commands and values
p.push(100, 100, 100, 200)
self.assertEqual(p.tostring(), '<path d="M 0 0 100 100 100 200" />')
# push commands strings
p = Path()
p.push('M 100 100 100 200')
self.assertEqual(p.tostring(), '<path d="M 100 100 100 200" />')
p = Path(d=('M 10', 7))
p.push('l', 100., 100.)
p.push('v 100.7 200.1')
self.assertEqual(p.tostring(), '<path d="M 10 7 l 100.0 100.0 v 100.7 200.1" />')
def test_nested_commands(self):
p = Path(d=('M 1,2', ['L', (7, 7, 'H 1 2 3 4 5')]))
self.assertEqual(p.tostring(), '<path d="M 1,2 L 7 7 H 1 2 3 4 5" />')
def test_push_arc_1(self):
p = Path('m0,0')
p.push_arc(target=(7,7), rotation=30, r=5)
self.assertEqual(p.tostring(), '<path d="m0,0 a 5 5 30 1,1 7 7" />')
def test_push_arc_2(self):
p = Path('m0,0')
p.push_arc(target=(7,7), rotation=30, r=(2,4), large_arc=False, angle_dir='-', absolute=True)
self.assertEqual(p.tostring(), '<path d="m0,0 A 2 4 30 0,0 7 7" />')
if __name__=='__main__':
unittest.main()
| true |
443e7ba6b6609ab146e2b54902f8d37402af6c17 | Python | gfugante/Old-Machine-Learning | /Tuning/ValidationCurves.py | UTF-8 | 1,777 | 2.859375 | 3 | [] | no_license |
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
from functions import print_df
cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
pipe = Pipeline([("scl", StandardScaler()),
("lr", LogisticRegression(random_state=0))])
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
train_scores, test_scores = validation_curve(pipe,
X_train,
y_train,
cv=10,
param_name="lr__C",
param_range=param_range)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='blue', marker='o', markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
plt.plot(param_range, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green')
plt.grid()
plt.xscale('log')
plt.legend(loc='lower right')
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.ylim([0.8, 1.0])
plt.show()
| true |