blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61b0ab1ae3ca11fcf2581a67933b5271749e9756 | ef0f2c4557db4388f6ddf46c94abb0ef9c78813d | /main/uppath.py | 725351ab623335d2d4ee859971f2f16eed7c820a | [] | no_license | baopingli/HAttMatting | c3382ccbdcb77419cdfc615eb7380a5388c09419 | 851aba6d1acbcf41749508b8c9cb7d007e4837c2 | refs/heads/main | 2023-02-18T04:10:57.184518 | 2021-01-11T02:51:20 | 2021-01-11T02:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | import math
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import pytorch_lightning as pl
from .conv_batchnorm_relu import ConvBatchNormRelu
class UpPath(pl.LightningModule):
def __init__(self, *args, **kwargs):
super(UpPath, self).__init__()
self.conv = ConvBatchNormRelu(*args, **kwargs)
self.unpool = nn.MaxUnpool2d(kernel_size=(2, 2), stride=(2, 2))
def forward(self, x, after_pool_feature, indices, output_size, return_conv_result=False):
# print("--------------------------------")
# print(x.shape)
# print(after_pool_feature.shape)
# print(indices.shape)
# print(output_size)
# print("--------------------------------")
if return_conv_result:
conv_result = torch.add(self.conv(x), after_pool_feature)
return self.unpool(conv_result, indices, output_size=output_size), conv_result
return self.unpool(torch.add(self.conv(x), after_pool_feature), indices, output_size=output_size)
| [
"vietnamican@gmail.com"
] | vietnamican@gmail.com |
cc454eceed7f736e10e8d949d9ebe9daf2909b01 | f5f62ed1f738f1f7a863468c7d6c0cf20af490f3 | /Vorlesung/src/Basic/date_class.py | 665b8a0a90ebb42f77ecf18b59b03b61caceeea7 | [] | no_license | AndrejLehmann/my_pfn_2019 | e0ceb7e0b999abb978c10bc15c7e05f31739b8ca | fe4d4ddb7192ecd2c0e0dbe043d72485c5ed9810 | refs/heads/master | 2020-08-31T01:53:37.486097 | 2020-04-20T20:48:08 | 2020-04-20T20:48:08 | 218,550,745 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | #!/usr/bin/env python3
import sys, re, argparse
def is_leap_year(year):
return year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)
class Date:
daysinmonth = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6:30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12:31}
def __init__(self,dstring):
mo = re.search(r'(\d{2})\.(\d{2})\.(\d{4})',dstring)
if mo:
self._day = int(mo.group(1))
self._month = int(mo.group(2))
self._year = int(mo.group(3))
else:
mo = re.search(r'(\d{4})-(\d{2})-(\d{2})',dstring)
if mo:
self._year = int(mo.group(1))
self._month = int(mo.group(2))
self._day = int(mo.group(3))
else:
raise Exception('"{}" is not a valid date'.format(dstring))
def date2number(self):
dayofyear = 0
assert self._month <= 12
for m in range(1,self._month):
dayofyear += Date.daysinmonth[m]
if m == 2 and is_leap_year(self._year):
dayofyear += 1
dayofyear += self._day
return dayofyear
def __str__(self):
return '{:02d}.{:02d}.{}'.format(self._day,self._month,self._year)
def parse_arguments():
p = argparse.ArgumentParser(description='parse dates and output')
p.add_argument('-d','--day2number',action='store_true',default=False,
help='show day of date in year')
p.add_argument('--inputfile',type=str,default='../../../../exercises/programmierung/python/Datetonumber/randomdates.csv',help='specify input file')
return p.parse_args()
if __name__ == '__main__':
args = parse_arguments()
try:
stream = open(args.inputfile,'r')
except IOError as err:
sys.stderr.write('{}: {}\n'.format(sys.argv[0],err))
exit(1)
for line in stream:
line = line.rstrip()
try:
dt = Date(line)
except Exception as err:
sys.stderr.write('{}: {}\n'.format(sys.argv[0],err))
exit(1)
values = [str(dt)]
if args.day2number:
values.append(str(dt.date2number()))
print('\t'.join(values))
stream.close
| [
"alehmann@physnet.uni-hamburg.de"
] | alehmann@physnet.uni-hamburg.de |
69f61deaa21aac62110923ba4cf7ab0bb97e8230 | 8f8e378c0ce4224244582c506c268edda3cc3b30 | /Common/DL/Day2/New/regression3.py | 3d37f5c556a00de3cebaf683e24d265254ed448a | [] | no_license | srsapireddy/Diploma-in-AI_NIELIT_Files | 223318319b2d4b8647d77b99d1ba03f0d6e15cf6 | 9e2ed78fbe03369ebef1aa81f3417fc21bdd4107 | refs/heads/master | 2021-05-17T14:28:00.059617 | 2020-03-29T09:28:04 | 2020-03-29T09:28:04 | 250,820,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # mlp for regression with mse loss function
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from matplotlib import pyplot
# generate regression dataset
X, y = make_regression(n_samples=1000, n_features=20, noise=0.1)
# standardize dataset
X = StandardScaler().fit_transform(X)
print(y.shape)
y = StandardScaler().fit_transform(y.reshape(len(y),1))[:,0]
print(y.shape)
# split into train and test
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]
# define model
model = Sequential()
model.add(Dense(25, input_dim=20, activation='relu'))
model.add(Dense(1, activation='linear'))
opt = SGD(lr=0.01, momentum=0.9)
model.compile(loss='mean_squared_logarithmic_error', optimizer=opt,metrics=['mse'])
# fit model
history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)
# evaluate the model
_,train_mse = model.evaluate(trainX, trainy, verbose=0)
_,test_mse = model.evaluate(testX, testy, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_mse, test_mse))
# plot loss during training
pyplot.title('Loss / Mean Squared Error')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
pyplot.plot(history.history['mean_squared_error'], label='train')
pyplot.plot(history.history['val_mean_squared_error'], label='test')
pyplot.legend()
pyplot.show()
| [
"sapireddyrahul@gmail.com"
] | sapireddyrahul@gmail.com |
029528a291961da7c442a5c7e15d8be06bc73bb3 | ddbb2bab9c96705c4ceb9e0d92e285d24d6acc0e | /Basic/chap08.py | 1b344e337ba1816ff33cd4ccd0aff94e429ba1e1 | [] | no_license | louisuss/CrawlingStudy | e25ca256be7c3c0bda026f1d404a9d91c36f9dd7 | 78f33939865bb8eba5adc042d63fe3def67382f2 | refs/heads/master | 2022-12-15T17:54:44.209175 | 2020-08-23T12:27:06 | 2020-08-23T12:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # 다음 주식 정보 가져오기
import json
import urllib.request as req
from fake_useragent import UserAgent
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Fake Header 정보 (가상으로 UserAgent 생성)
ua = UserAgent()
# print(ua.chrome)
# print(ua.safari)
# print(ua.random)
# 헤더 정보
headers = {
'User-agent': ua.safari,
'referer': 'http://finance.daum.net/'
}
# 다음 주식 요청 URL
url = 'http://finance.daum.net/api/search/ranks?limit=10'
# 요청
# Request() 객체 클래스 안에 url, headers 정보 입력
res = req.urlopen(req.Request(url, headers=headers)).read().decode('UTF-8')
# 응답 데이터 확인 (Json Data)
# print('res', res)
# 응답 데이터 str -> json 변환 및 data 값 출력
rank_json = json.loads(res)['data']
# 중간 확인
# print('중간 확인: \n',rank_json)
# print()
for data in rank_json:
print('순위: {}, 금액: {}, 회사명: {}'.format(
data['rank'], data['tradePrice'], data['name']))
# 순위: 1, 금액: 24800, 회사명: 노터스
# 순위: 2, 금액: 328000, 회사명: 셀트리온
# 순위: 3, 금액: 73000, 회사명: 신풍제약
# 순위: 4, 금액: 325000, 회사명: 카카오
# 순위: 5, 금액: 54400, 회사명: 삼성전자
# 순위: 6, 금액: 117500, 회사명: 현대차
# 순위: 7, 금액: 191000, 회사명: SK바이오팜
# 순위: 8, 금액: 69200, 회사명: 일양약품
# 순위: 9, 금액: 106500, 회사명: 셀트리온헬스케어
# 순위: 10, 금액: 175500, 회사명: 씨젠
| [
"dobi1115@gmail.com"
] | dobi1115@gmail.com |
edd2294d74c818df111097d4dda5b7c88d015b4f | 0410c8e7db491283fe1383731aa2f130fccfaff1 | /main.py | 1d2b7f0dbb572a7087c63387a33e8b920811d516 | [] | no_license | simplifies/PassGetter | e1af86cfed2012cbea76c90c3c4a0b0ac9b7a4fa | d4ded9b966f18c6f7d3fe5718662ea02b2e1e3b3 | refs/heads/master | 2022-12-08T23:30:18.342699 | 2020-09-16T15:20:28 | 2020-09-16T15:20:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,687 | py | import os
import sys
import xml.etree.ElementTree as ET
import time
import sockets.client as client
import glob
from platform import system
system = system()
if system == "Windows":
import win32com.shell.shell as shell
else:
pass
def admin():
if system == "Windows":
if sys.argv[-1] != 'asadmin':
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + ['asadmin'])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
else:
pass
elif system == "Linux":
os.system('xdg-mime query default x-scheme-handler/http > browser.txt')
def detect_browser():
if system == "Windows":
os.system('dism /online /Export-DefaultAppAssociations:"%UserProfile%\Desktop\FileAssociations.xml"')
time.sleep(5)
root = ET.parse("C:" + os.getenv('HOMEPATH') + r'\Desktop\FileAssociations.xml').getroot()
for type_tag in root:
value = type_tag.get('Identifier')
if value == "https":
browser = type_tag.get("ApplicationName")
os.remove("C:" + os.getenv('HOMEPATH') + r'\Desktop\FileAssociations.xml')
return browser
elif system == "Linux":
with open('browser.txt', 'r') as f:
browser = f.read()
os.remove('browser.txt')
return browser
def run_wizard(browser):
if system == "Windows":
from browser_windows.win_operagx import windows_opera
from browser_windows.win_chrome import windows
import browser_windows.win_firefox as win_firefox
NSS = win_firefox.NSSDecoder()
if "Opera" in browser:
windows_opera()
elif "Chrome" in browser:
windows()
elif "Firefox" in browser:
win_firefox.decrypt_passwords()
else:
print("The browser is not supported")
elif system == "Linux":
from browsers_linux.linux_chrome import main
import browsers_linux.linux_firefox as linux_firefox
NSS = linux_firefox.NSSDecoder()
if 'Firefox' or 'firefox' in browser:
linux_firefox.decrypt_passwords()
elif 'chrome' or 'Chrome' in browser:
main()
else:
print('the browser is not supported')
if __name__ == '__main__':
admin()
browser = detect_browser()
run_wizard(browser)
filename = ["pass.db", "firepass.db", "operagx.db"]
host = ""
port = 5001
for files in filename:
if files in glob.glob('*.db'):
client.send_file(files, host, port)
else:
pass
| [
"noreply@github.com"
] | simplifies.noreply@github.com |
1fc6b77c43cbb4d11e20de82d9d104daac283aeb | f43d3731a21ee5df09298f5541b52484f408e010 | /spider/news/MongoPipeline.py | 687daf1f028f475fc148563dcbcad22e96ec0d0c | [] | no_license | cash2one/wechat_admin | 2ba8c35deffff37c263b7091229ba2d86f2aaeaf | af0712fdad867d76dcee2092abcf32cada49d075 | refs/heads/master | 2021-05-04T22:22:53.514787 | 2017-09-25T10:03:07 | 2017-09-25T10:03:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import json
from spider.loggers.log import crawler
from spider.news.Pipeline import Pipeline
from spider.util.MongoUtil import MongoUtil
class MongoPipeline(Pipeline):
def __init__(self, collection):
self.collection = collection
def put(self, item):
json_obj = item.to_dict()
MongoUtil.save(self.collection, json_obj)
return item
| [
"“545314690@qq.com”"
] | “545314690@qq.com” |
c503f0ccea6b194021cfd21c8b840271f6821aaa | 6faedd57c152156ca508be06b15bcd04e27e974b | /peeringdb_server/management/commands/pdb_org_cleanup.py | ad73a8fda226376e7b8a26fe16491ec0c89c9c71 | [
"BSD-2-Clause"
] | permissive | grizz/peeringdb | 1dca75ac7cbb357ff6166285fb89de07a0a1ed5d | 355fff42924a62b1920bd1f263d83e696e96b74b | refs/heads/master | 2023-01-06T14:07:13.176255 | 2022-11-15T18:34:50 | 2022-11-15T18:34:50 | 164,004,395 | 0 | 0 | NOASSERTION | 2019-01-03T17:41:55 | 2019-01-03T17:41:54 | null | UTF-8 | Python | false | false | 1,617 | py | from django.core.management.base import BaseCommand
from peeringdb_server.models import Organization
class Command(BaseCommand):
help = "Cleanup deleted Organization objects"
def add_arguments(self, parser):
parser.add_argument(
"--commit",
action="store_true",
help="commit changes, otherwise run in pretend mode",
)
def log(self, msg):
if not self.commit:
self.stdout.write(f"[pretend] {msg}")
else:
self.stdout.write(msg)
def handle(self, *args, **options):
self.commit = options.get("commit")
orgs = Organization.objects.filter(status="deleted")
# Confirm if user wants to continue via prompt
for org in orgs:
self.log(
f"Cleaning up Organization {org.id} - {org.name} ({org.admin_usergroup.user_set.all().count() + org.usergroup.user_set.all().count()} users)"
)
if self.commit:
# Remove users from user and admin usergroups
aug = org.admin_usergroup.user_set
for user in aug.all():
aug.remove(user)
user.save()
ug = org.usergroup.user_set
for user in ug.all():
ug.remove(user)
user.save()
# Remove all affiliation requests
for affiliation in org.affiliation_requests.filter(status="pending"):
affiliation.cancel()
self.log(f"Removed all users from deleted organization {org.id}")
| [
"noreply@github.com"
] | grizz.noreply@github.com |
aaebaa9fae2392777c866f60bc43f48468fef328 | cd4eb25911d3e3b092aa97aaa7b8fbba6c3a0704 | /lang/python/gevent/testSigQuit.py | 266a8c3f7e07ad1e923b90327d24877fd4efdf12 | [
"MIT"
] | permissive | liuyang1/test | 29bb142982d2ef0d79b71e8fe5f5e0d51ec5258e | 9a154e0161a1a33baad53f7223ee72e702532001 | refs/heads/master | 2023-08-05T08:56:50.526414 | 2023-07-21T05:49:53 | 2023-07-21T11:16:09 | 26,949,326 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | import gevent
import signal
def run_forever():
gevent.sleep(100)
if __name__ == "__main__":
gevent.signal(signal.SIGQUIT, gevent.shutdown)
thread = gevent.spawn(run_forever)
thread.join()
| [
"liuyang1@mail.ustc.edu.cn"
] | liuyang1@mail.ustc.edu.cn |
d6e5f838cf0ff1735ab58b6a342ba0064ed99f4a | b31f44faa4ff1b462585130aff31de959a3e1623 | /Python/Data Structure/Linear List/Sort Array By Parity II.py | 969ae2d0c825bea00b3571a4ee45cadff6fd370b | [] | no_license | fagan2888/Coding-Interview | ac66b1fc33aecdbc2f1e1ec66491561c424e3024 | fe7afbead2f1e252f4bc5692e0f94a6ce32f3c44 | refs/heads/master | 2021-04-21T05:16:34.002298 | 2020-02-02T15:41:05 | 2020-02-02T15:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
# two pointers
j = 1
for i in range(0, len(A), 2): #even
if A[i] % 2:
while A[j] % 2:
j += 2
A[i], A[j] = A[j], A[i]
return A | [
"LIUXinhe@outlook.com"
] | LIUXinhe@outlook.com |
aa93ad03fc44012a2b48b86533a29102093f9c58 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/176.py | e46c645d92b8969ad48b9b94179d7069226800fc | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,876 | py | def main(filename):
f = open(filename)
lines = f.readlines()
f.close()
outlines = []
NORTH, WEST, EAST, SOUTH = 0, 1, 2, 3
T = int(lines.pop(0))
def get_neighbours(arr, row, col):
neighbours = []
if row > 0:
neighbours.append((NORTH, arr[row - 1][col]))
if col > 0:
neighbours.append((WEST, arr[row][col - 1]))
if col < W - 1:
neighbours.append((EAST, arr[row][col + 1]))
if row < H - 1:
neighbours.append((SOUTH, arr[row + 1][col]))
return neighbours
for case in xrange(T):
H, W = map(lambda x:int(x), lines.pop(0).split(' '))
alt_map = []
link_map = []
basin_map = []
for i in xrange(H):
alt_map.append(map(lambda x:int(x), lines.pop(0).split(' ')))
for row in xrange(H):
link_map.append([])
for col in xrange(W):
neighbours = get_neighbours(alt_map, row, col)
if len(neighbours) > 0:
min_alt = min(zip(*neighbours)[1])
if min_alt < alt_map[row][col]:
flow_to = filter(lambda x:x[1] == min_alt, neighbours)
tgt_cell = flow_to[0]
if len(flow_to) > 1:
min_dir = min(zip(*flow_to)[0])
tgt_cell = filter(lambda x: x[0] == min_dir, flow_to)[0]
link_map[row].append(tgt_cell[0])
else:
link_map[row].append(-1)
else:
link_map[row].append(-1)
def get_delta_row_col(dir):
delta_row = 0
delta_col = 0
if dir == NORTH:
delta_row = -1
elif dir == WEST:
delta_col = -1
elif dir == EAST:
delta_col = 1
elif dir == SOUTH:
delta_row = 1
return (delta_row, delta_col)
def get_conn(row, col):
connected = []
cur_dir = link_map[row][col]
if cur_dir != -1:
d_row, d_col = get_delta_row_col(cur_dir)
connected.append((row + d_row, col + d_col))
link_map[row][col] = -1
neighbours = get_neighbours(link_map, row, col)
for dir, link_dir in neighbours:
if (3 - dir) == link_dir:
d_row, d_col = get_delta_row_col(dir)
connected.append((row + d_row, col + d_col))
link_map[row + d_row][col + d_col] = -1
return connected
basin_map = list(alt_map)
cur_char = 'a'
nodes = []
num_accounted = 0
i = 0
j = 0
while num_accounted < H * W:
while True:
if isinstance(basin_map[i][j], int):
nodes.append((i, j))
break
j += 1
if j == W:
j = 0
i += 1
while len(nodes) > 0:
node_row, node_col = nodes.pop(0)
basin_map[node_row][node_col] = cur_char
num_accounted += 1
for row, col in get_conn(node_row, node_col):
nodes.append((row, col))
cur_char = chr(ord(cur_char) + 1)
line = 'Case #%i:\n' % ((case + 1))
for row in xrange(H):
line += ' '.join(basin_map[row])
line += '\n'
outlines.append(line)
f = open('B.out', 'w')
f.writelines(outlines)
f.close()
if __name__ == "__main__":
main('B-large.in') | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6c72a45ff32d4962d15076f7ce9e9857f7f46759 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/22/usersdata/107/11706/submittedfiles/av1_2.py | 1ed072be432d68e735832fde03663c7d7ac9cb0d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=int(input('digite o valor de a:'))
b=int(input('digite o valor de b:'))
c=int(input('digite o valor de c':))
d=int(input('digite o valor de d:'))
if ABAD==5393 and CBCD==6268:
PRINT('VERDADEIRO')
ELSE:
print('FALSA') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4dece2cdb4af6765f620558479f12b10a049bb03 | 8e3bd35267f40341d7ca03646e10a2b92eace0c7 | /series.py | b7a4817a18ddf7d719d6458cd6f4a7a4f031f87b | [] | no_license | shirsho-12/mathScripts | 2eb762b64ec61ffe8f0182f478353fda121d8c3b | 0ada093050221a2f4d9b33c09783b052c17fbcb3 | refs/heads/master | 2023-04-01T06:29:55.308901 | 2021-04-17T13:54:10 | 2021-04-17T13:54:10 | 354,479,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | import numpy
from sympy import Symbol, pprint, simplify
import sympy as sp
def get_series(var, expr, num_terms=10):
series = sp.series(expr, var, n=num_terms)
pprint(simplify(series))
x = Symbol("x")
expr = sp.ln(1 - 8*x**2)
# expr = sp.cos(x)
# expr = sp.atan(x**3)
# expr = sp.ln(sp.sec(x))
get_series(x, expr)
| [
"shirshajit@gmail.com"
] | shirshajit@gmail.com |
2974a98e4d774482aebe15fe8fd2b5970e282ff3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /XsJLwhAddzbxdQqr4_4.py | 27672ae255be132627d23a9dd8c94ef3d9a364fa | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | """
Create a function that takes a list and returns the **difference** between the
biggest and smallest numbers.
### Examples
difference_max_min([10, 4, 1, 4, -10, -50, 32, 21]) ➞ 82
# Smallest number is -50, biggest is 32.
difference_max_min([44, 32, 86, 19]) ➞ 67
# Smallest number is 19, biggest is 86.
### Notes
N/A
"""
def difference_max_min(lst):
ooga = max(lst)
booga = min(lst)
return ooga - booga
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c8d933b28a46b474602a1ecd35e3973757ca6e7c | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/kenan/desktop/compiz/compizconfig-python/actions.py | 86771fd00f5ece6b0cc60cd32a33be547e5a41d2 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("COPYING")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
a0f14bf59489e1820edcf0a4329a4155f3433160 | b55f7fe191a0ac499213505b297edffd2daab2ec | /DeepRLTrader/core/__init__.py | 31884eca75e1a3469faae7b5ec3c052da83623ad | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | chmbrs/DeepRLTrader | af77c33aee908d732fa760a1c48273f9b8ec6ae5 | 96ae2069a42e29838aa26165af0556835c1808dd | refs/heads/master | 2020-04-17T00:46:06.199575 | 2019-01-16T16:51:48 | 2019-01-16T16:51:48 | 166,061,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from .environnement import Local_env
from .environnement import Live_env
from .worker import Local_Worker
from .worker import Live_Worker
from .session import Local_session
from .session import Live_session
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
5e6b1e4f698c5c092aff75f1caa0e101fcacd01b | f0e8338762530bd6c2cc402cda64c43bcec329ae | /leetcode/35. 搜索插入位置.py | 2f9b313fad3d2a95b67f4af9fb389fa870e4f470 | [] | no_license | pengyuhou/git_test1 | bcd60554d2dadad972848047d00f888444462f05 | 5aa441f94a0aa713771bdd93b53a702032060f5d | refs/heads/master | 2022-11-22T08:52:52.767933 | 2020-07-18T03:50:22 | 2020-07-18T03:50:22 | 259,177,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
import bisect
return bisect.bisect_left(nums, target)
if __name__ == '__main__':
# print(Solution().searchInsert([1, 3, 5, 6], 0))
import bisect
a = [1, 3, 5, 6]
print(bisect.bisect_left(a, 5))
bisect.insort(a,5)
print(a)
| [
"786490473@qq.com"
] | 786490473@qq.com |
d0377ce55de49112d96d6d73d6fdfc511bcc9219 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/parser/template/graph_tests/test_authorise_usergroups.py | 070b6c41a40a3be42877c809d4c3b60fa8448c92 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,358 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.authorise import TemplateAuthoriseNode
from programy.config.brain.brain import BrainConfiguration
from programy.config.brain.security import BrainSecurityConfiguration
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphAuthoriseTests(TemplateGraphTestClient):
def get_brain_config(self):
brain_config = BrainConfiguration()
brain_config.security._authorisation = BrainSecurityConfiguration("authorisation")
brain_config.security.authorisation._classname = "programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService"
brain_config.security.authorisation._denied_srai = "ACCESS_DENIED"
brain_config.security.authorisation._usergroups = "$BOT_ROOT/usergroups.yaml"
return brain_config
def test_authorise_with_role_as_attrib_access_allowed(self):
template = ET.fromstring("""
<template>
<authorise role="root">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("root", auth_node.role)
result = auth_node.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
def test_authorise_with_role_as_attrib_and_optional_srai_access_allowed(self):
template = ET.fromstring("""
<template>
<authorise role="root" denied_srai="NO_ACCESS">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("root", auth_node.role)
result = auth_node.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
def test_authorise_with_role_as_attrib_access_denied(self):
template = ET.fromstring("""
<template>
<authorise role="denied">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("denied", auth_node.role)
def test_authorise_with_role_as_attrib_and_optional_srai_access_denied(self):
template = ET.fromstring("""
<template>
<authorise role="denied" denied_srai="NO_ACCESS">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("denied", auth_node.role)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
7b8a0f33c8c2cfce8e2214e673d7e23930159a0d | 1c762f8a085e851dcc38503008c3c997782c5c79 | /Data_Generator.py | 89fdbfcd546313b828a0742f5ac4328968277aa7 | [
"MIT"
] | permissive | Ajay2521/Face-Recognition-using-Siamese-Network | ca599ab2d05869a20accc22a3f3e17a77a193c18 | 0752e85b046599bf7ddb960cefeaf63b309e26c1 | refs/heads/main | 2023-05-13T21:14:56.740169 | 2021-06-11T03:37:41 | 2021-06-11T03:37:41 | 375,892,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,065 | py | # importing the neccessary libraries
# open cv for image processing
import cv2
# used to manipulate different parts
import sys
# used for manipulating array/matrics
import numpy as np
# used for accessing the file and folder in the machine
import os
# used for landmark's facial detector with pre-trained models, the dlib is used to estimate the location of 68 coordinates
import dlib
from imutils import face_utils
# for visulating the image
import matplotlib.pyplot as plt
# use to retrieve the faces information
detector = dlib.get_frontal_face_detector()
# print(detector)
# function for face detecting and save the Face ROI(embedding)
# takes 2 parameter, imagepath = Uploaded image location, name = user name
def image_data_generator(imagePath,name):
# setting up the path for saving the image
path = 'database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path, name)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# reading the uploaded image
image = cv2.imread(imagePath)
# print(image) -> print the image value in array [n,n,nc]
# plt.imshow(image) -> displaying the image
# converting the RGB Image into Gray scale Image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# print(gray_image) -> print the image value in array [n,n]
# plt.imshow(gray_image) # -> displaying the image
# detecting the faces in the image, which is similar to detectMultiScale()
# faces = face_cascade.detectMultiScale(gray_image)
# print(faces)
# The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
faces = detector(gray_image, 1)
#print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, rigth = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = gray_image[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the roi croped images
cv2.imwrite(directory+'/'+name+".jpg",roi_image)
imagePath = 'faceDetect.jpg'
name = input("\nEnter name of person : ")
image_data_generator(imagePath, name)
# function for face detecting and save the Face ROI(embedding) from webcam
# takes 1 parameter, name = user name
def video_data_generator(name):
# setting up the path for saving the image
path = 'database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path, name)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# starting up the webcam
webcam = cv2.VideoCapture(0)
number_of_images = 0
MAX_NUMBER_OF_IMAGES = 20
while number_of_images < MAX_NUMBER_OF_IMAGES:
# reading the data from the webcam
ret, frame = webcam.read()
# flips a 2D array around vertical, horizontal, or both axes
# 1 means flipping around y-axis
frame = cv2.flip(frame, 1)
# converting the rgb frames to gray scale frames
# gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
faces = detector(frame, 1)
#print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, rigth = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = frame[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the croped image
cv2.imwrite(os.path.join(directory, str(name+str(number_of_images)+'.jpg')), roi_image)
number_of_images += 1
cv2.rectangle(frame, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 255, 0), 2)
# displaying the video
cv2.imshow("Webcam",frame)
# for closing the stream
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
# stoping the webcam
webcam.release()
# closing the window
cv2.destroyAllWindows()
name = input("\nEnter name of person : ")
video_data_generator(name)
| [
"noreply@github.com"
] | Ajay2521.noreply@github.com |
2e8a3ba4e7038eac2ccc96263bdc2de8ad1ca6fe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03761/s160390356.py | 6afd5b1b7f5508c82464c33c2033ab81a3670a86 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import sys
from string import ascii_lowercase as alphabets
from collections import Counter
def main():
inf=100
alphd={x:0 for x in alphabets}
ansd={x:inf for x in alphabets}
n=int(input())
s=[input() for _ in range(n)]
for st in s:
for x in st:
alphd[x]+=1
for a in alphabets:
ansd[a]=min(ansd[a],alphd[a])
alphd[a]=0
print(''.join([a*ansd[a] for a in alphabets if ansd[a]<inf]))
def main2():
inf=100
n=int(input())
s=[Counter(input()) for _ in range(n)]
ansd={x:inf for x in alphabets}
for c in s:
for x in alphabets:
ansd[x]=min(ansd[x],c[x])
print(''.join([a*ansd[a] for a in alphabets if ansd[a]<inf]))
if __name__=='__main__':
main2()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b9edd3e6f9ac2e63615f9388ce32ecd2ad05b7ff | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03659/s513043584.py | 3aa6b000e56fcb4dfa57e4fb72410c1623d07084 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | n = int(input())
a_lst = list(map(int, input().split()))
x = a_lst[0]
y = sum(a_lst[1:])
diff = abs(y - x)
for a in a_lst[1:-1]:
x += a
y -= a
diff = min(diff, abs(y - x))
print(diff) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
db708b0106ae135b1760c606ca3898726edcde4b | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad_BN/pyr_Tcrop256_pad20_jit15/pyr_2s/L5/step11_L2345678.py | 08c22bc3cddbff4ee3e973086092e77275248d2a | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,975 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
import Exps_7_v3.doc3d.I_to_M_Gk3_no_pad_BN.pyr_Tcrop256_pad20_jit15.pyr_0s.L5.step10_a as L5_0side
import Exps_7_v3.doc3d.I_to_M_Gk3_no_pad_BN.pyr_Tcrop256_pad20_jit15.pyr_1s.L5.step10_a as L5_1side
import step10_a as side2
#################################################################################################################################################################################################################################################################################################################################################################################################
ch032_1side_1__2side_all = [
L5_1side.ch032_1side_1,
side2.ch032_1side_1__2side_1,
]
ch032_1side_2__2side_all = [
L5_1side.ch032_1side_2,
side2.ch032_1side_2__2side_1,
side2.ch032_1side_2__2side_2,
]
ch032_1side_3__2side_all = [
L5_1side.ch032_1side_3,
side2.ch032_1side_3__2side_1,
side2.ch032_1side_3__2side_2,
side2.ch032_1side_3__2side_3,
]
ch032_1side_4__2side_all = [
L5_1side.ch032_1side_4,
side2.ch032_1side_4__2side_1,
side2.ch032_1side_4__2side_2,
side2.ch032_1side_4__2side_3,
side2.ch032_1side_4__2side_4,
]
ch032_1side_5__2side_all = [
L5_1side.ch032_1side_5,
side2.ch032_1side_5__2side_1,
side2.ch032_1side_5__2side_2,
side2.ch032_1side_5__2side_3,
side2.ch032_1side_5__2side_4,
side2.ch032_1side_5__2side_5,
]
ch032_1side_6__2side_all = [
L5_1side.ch032_1side_6,
side2.ch032_1side_6__2side_1,
side2.ch032_1side_6__2side_2,
side2.ch032_1side_6__2side_3,
side2.ch032_1side_6__2side_4,
side2.ch032_1side_6__2side_5,
side2.ch032_1side_6__2side_6,
]
ch032_1side_all__2side_all = [
[L5_0side.ch032_0side,],
ch032_1side_1__2side_all,
ch032_1side_2__2side_all,
ch032_1side_3__2side_all,
ch032_1side_4__2side_all,
ch032_1side_5__2side_all,
ch032_1side_6__2side_all,
]
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
ca50bb1529182918b335ff225b5d425669d78e7e | 56997c84a331433225f89f168520ad8d709083c1 | /Programmers/기타문제/압축/ziping_ver1.py | 7a9ba5815d01771a1bcea7d53a147f1cf8df61e5 | [] | no_license | miseop25/Back_Jun_Code_Study | 51e080f8ecf74f7d1a8bb1da404d29c8ba52325c | 1d993e718c37c571aae1d407054ec284dc24c922 | refs/heads/master | 2022-11-06T01:05:05.028838 | 2022-10-23T13:11:22 | 2022-10-23T13:11:22 | 200,828,984 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | def solution(msg):
answer = []
lzw_dict = dict()
word = "A"
for i in range(1, 27) :
lzw_dict[word] = i
word = chr(ord("A") + i )
m_index = 0
w = msg[0]
while m_index < len(msg):
if m_index + 1 < len(msg) :
temp = w + msg[m_index + 1]
else :
temp = w
if temp in lzw_dict :
answer.append(lzw_dict[temp])
else :
answer.append(lzw_dict[temp[: -1]])
break
if temp in lzw_dict :
w = temp
m_index += 1
else :
i+= 1
lzw_dict[temp] = i
answer.append(lzw_dict[temp[: -1]])
m_index += 1
w = msg[m_index]
return answer
print(solution("KAKAO")) | [
"richard25@naver.com"
] | richard25@naver.com |
bc16ca07017d65d4491e9ab5faa7546a0003799e | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/pcode/memstate/UniqueMemoryBank.pyi | 851ac4589eaa0006c84ab4e084c511aae767f320 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | pyi | from typing import List
import ghidra.pcode.memstate
import ghidra.program.model.address
import java.lang
class UniqueMemoryBank(ghidra.pcode.memstate.MemoryBank):
"""
An subclass of MemoryBank intended for modeling the "unique" memory
space. The space is byte-addressable and paging is not supported.
"""
class WordInfo(object):
initialized: int
word: long
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getByte(self, __a0: int) -> int: ...
def getClass(self) -> java.lang.Class: ...
def getWord(self, __a0: List[int]) -> None: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setByte(self, __a0: int, __a1: int) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self, spc: ghidra.program.model.address.AddressSpace, isBigEndian: bool): ...
def clear(self) -> None:
"""
Clear unique storage at the start of an instruction
"""
...
@staticmethod
def constructValue(ptr: List[int], offset: int, size: int, bigendian: bool) -> long: ...
@staticmethod
def deconstructValue(ptr: List[int], offset: int, val: long, size: int, bigendian: bool) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getChunk(self, offset: long, size: int, dest: List[int], stopOnUninitialized: bool) -> int: ...
def getClass(self) -> java.lang.Class: ...
def getInitializedMaskSize(self) -> int:
"""
@return the size of a page initialized mask in bytes. Each bit within the
mask corresponds to a data byte within a page.
"""
...
def getMemoryFaultHandler(self) -> ghidra.pcode.memstate.MemoryFaultHandler:
"""
@return memory fault handler (may be null)
"""
...
def getPageSize(self) -> int:
"""
A MemoryBank is instantiated with a \e natural page size. Requests for large chunks of data
may be broken down into units of this size.
@return the number of bytes in a page.
"""
...
def getSpace(self) -> ghidra.program.model.address.AddressSpace:
"""
@return the AddressSpace associated with this bank.
"""
...
def hashCode(self) -> int: ...
def isBigEndian(self) -> bool:
"""
@return true if memory bank is big endian
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setChunk(self, offset: long, size: int, src: List[int]) -> None: ...
def setInitialized(self, offset: long, size: int, initialized: bool) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
dda51f8afb2664e55ffebcb38827068a86d57fc9 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20180201/get_virtual_network_gateway_bgp_peer_status.py | fde6e4b745e746cc3d66fd3df1e4a7b7d879a36e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,685 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponse']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180201:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
ab931e5ccb6b71bc187e3317e6c7844a9b49c6ea | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 1 - Getting Started/Breakouts/Breakout 1.2 - Turtle Graphics/Turtle Shapes v2 - block1.py | dfff63c79e6078cc283ec4b1983b196f82b5dbf7 | [
"CC0-1.0"
] | permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Turtle Graphics - Further Activities
# Match the code blocks below to the corresponding shape
from turtle import * # import the turtle graphics library
forward(100)
right(90)
forward(50)
right(90)
forward(100)
right(90)
forward(50)
| [
"noreply@github.com"
] | pdst-lccs.noreply@github.com |
1d3245d9d48900c68394b1f5a8a746a3c42b03d0 | 0e6ce40f5a8e302698c2d0ddf945f7fa34dd190a | /mysite/polls/urls.py | 111b32d6b36b0bdc1ba4287fa9ac61db213f246c | [] | no_license | ricetak/django_tutorial | c89887a8153fb6901d3980217318c5137f7d3495 | 370ebade46a1aeade28b6626c4014cfea5db3a8b | refs/heads/master | 2020-05-26T09:12:34.024688 | 2019-05-23T07:10:55 | 2019-05-23T07:10:55 | 188,181,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | from django.urls import path
from . import views
'''
app_name = 'polls'
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
'''
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"you@example.com"
] | you@example.com |
2298f7d32ab7b27f31caad11a0c3d60da1efc78d | a1b649fcd0b6f6c51afb13f406f53d7d823847ca | /studies/migrations/0031_merge_20170828_1227.py | f58fb7d972f9061d9e93b85af8e2e50c1173197c | [
"MIT"
] | permissive | enrobyn/lookit-api | e79f0f5e7a4ef8d94e55b4be05bfacaccc246282 | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | refs/heads/master | 2020-03-27T01:54:00.844971 | 2018-08-08T15:33:25 | 2018-08-08T15:33:25 | 145,752,095 | 0 | 0 | MIT | 2018-08-22T19:14:05 | 2018-08-22T19:14:04 | null | UTF-8 | Python | false | false | 340 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 16:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0030_merge_20170827_1909'),
('studies', '0030_merge_20170827_1539'),
]
operations = [
]
| [
"cwisecarver@cos.io"
] | cwisecarver@cos.io |
52836a63f5ed574bbefa5ef16a42d4feb1fddf38 | 9505e191cb287507c7df05212ab562bea1eda553 | /Data structures&Algorithms/bst.py | af404b2867e2e4e6c1e26a024acf1a2cfc23779b | [
"MIT"
] | permissive | iisdd/Courses | c7a662305f3efe7d61eb23f766381290b1107bb8 | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | refs/heads/main | 2023-04-15T17:40:36.474322 | 2021-04-27T14:31:42 | 2021-04-27T14:31:42 | 316,904,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,154 | py | # 二叉搜索树,取个英文名字方便调包
class BiTreeNode:
def __init__(self, data):
self.data = data
self.lchild = None # 左孩子节点
self.rchild = None # 右孩子节点
self.parent = None
class BST:
def __init__(self, li=None):
self.root = None
if li:
for val in li:
self.insert_no_rec(val)
############################################## 插入功能 ################################################
def insert(self, node, val):
if not node: # 当前节点为None,就改变这个位置的值
node = BiTreeNode(val)
elif node.data > val: # 如果值改变了那就与左孩子建立联系,如果没改变就当说了句废话
node.lchild = self.insert(node.lchild, val) # 如果node.lchild有值就接着比,没有就落户了
node.lchild.parent = node
elif node.data < val: # 不考虑插入相同元素的情况
node.rchild = self.insert(node.rchild, val)
node.rchild.parent = node
return node
def insert_no_rec(self, val): # 非递归形式的插入
p = self.root
if not p: # 空树
self.root = BiTreeNode(val)
return
while 1:
if p.data > val:
if p.lchild: # 存在左孩子
p = p.lchild
else: # 左边没有节点,捏一个节点
p.lchild = BiTreeNode(val)
p.lchild.parent = p
return
elif p.data < val:
if p.rchild:
p = p.rchild
else:
p.rchild = BiTreeNode(val)
p.rchild.parent = p
return
############################################## 插入功能 ################################################
############################################## 查询功能 ################################################
def query(self, node, val): # 查询功能,递归版本
if not node:
return None
if node.data < val:
return self.query(node.rchild, val)
elif node.data > val:
return self.query(node.lchild, val)
else:
return node
def query_no_rec(self, val):
p = self.root
while p:
if p.data > val:
p = p.lchild
elif p.data < val:
p = p.rchild
else:
return p
############################################## 查询功能 ################################################
###################################### 遍历打印功能 #######################################
def pre_order(self, root): # 前序遍历树的节点,使用递归实现
if root:
print(root.data, end=',')
self.pre_order(root.lchild)
self.pre_order(root.rchild)
def in_order(self, root):
if root:
self.in_order(root.lchild)
print(root.data, end=',')
self.in_order(root.rchild)
def post_order(self, root):
if root:
self.post_order(root.lchild)
self.post_order(root.rchild)
print(root.data, end=',')
###################################### 遍历打印功能 #######################################
###################################### 删除功能 #######################################
def __remove_node_1(self, node): # 情况1: 删除的节点是叶子节点,两个下划线表示类内方法
if not node.parent: # node是根节点
self.root = None
elif node == node.parent.lchild: # node是它父节点的左孩子
node.parent.lchild = None
else: # node是它父节点的右孩子
node.parent.rchild = None
def __remove_node_21(self, node): # 情况2.1: 删除的节点不是叶子节点,且其只有左孩子
if not node.parent: # node是根节点
self.root = node.lchild
node.lchild.parent = None
elif node == node.parent.lchild: # node是其父节点的左孩子节点
node.parent.lchild = node.lchild
node.lchild.parent = node.parent
else: # node是其父节点的右孩子节点
node.parent.rchild = node.rchild
node.rchild.parent = node.parent
def __remove_node_22(self, node): # 情况2.2: 删除的节点非叶子节点,且其只有右孩子
if not node.parent:
self.root = node.rchild
node.rchild.parent = None
elif node == node.parent.lchild: # node是其父节点的左孩子节点
node.parent.lchild = node.rchild
node.rchild.parent = node.parent
else: # node是其父节点的右孩子节点
node.parent.rchild = node.rchild
node.rchild.parent = node.parent
def delete(self, val):
if self.root: # 不是空树
node = self.query_no_rec(val)
if not node:
return False # 没找到要删除的节点
if not node.lchild and not node.rchild: # 情况1:叶子节点
self.__remove_node_1(node)
elif not node.rchild: # 情况2.1:只有左孩子节点
self.__remove_node_21(node)
elif not node.lchild: # 情况2.2:只有右孩子节点
self.__remove_node_22(node)
else: # 情况3:有两个节点,找右孩子的最小节点
min_node = node.rchild
while min_node.lchild:
min_node = min_node.lchild
node.data = min_node.data
if min_node.rchild:
self.__remove_node_22(min_node)
else:
self.__remove_node_1(min_node)
###################################### 删除功能 #######################################
# tree = BST([4,6,7,9,2,1,3,5,8])
# tree.pre_order(tree.root)
# print('')
# tree.in_order(tree.root) # 升序的
# print('\n', tree.query_no_rec(4).data)
# print(tree.query_no_rec(11))
#
# tree.delete(4)
# tree.delete(1)
# tree.delete(8)
# tree.in_order(tree.root) | [
"noreply@github.com"
] | iisdd.noreply@github.com |
b40d9aae99b9bedfefe3e549932913b1fbbe044c | e01ab8185f34be16777e61aa7ce71f00e037dcf3 | /scripts/parallel_align_seqs_pynast.py | d4b23c42dd44163a509d00625ff5dedd70c4da5d | [] | no_license | zellett/qiime | caf6eae9e8346f7cdd2fed1f9c580a1777046709 | 395ae76e03ccf57272dc17a6d6555edb15ce4783 | refs/heads/master | 2020-05-20T06:03:22.867386 | 2012-12-09T19:08:07 | 2012-12-09T19:08:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.5.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from qiime.util import (parse_command_line_parameters,
get_options_lookup,
make_option,
load_qiime_config)
from qiime.align_seqs import pairwise_alignment_methods
from qiime.parallel.align_seqs import ParallelAlignSeqsPyNast
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
script_info={}
script_info['brief_description']="""Parallel sequence alignment using PyNAST"""
script_info['script_description']="""A wrapper for the align_seqs.py PyNAST option, intended to make use of multicore/multiprocessor environments to perform analyses in parallel."""
script_info['script_usage']=[]
script_info['script_usage'].append(("""Example""","""Align the input file (-i) against using PyNAST and write the output (-o) to $PWD/pynast_aligned_seqs/. ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""","""%prog -i $PWD/inseqs.fasta -o $PWD/pynast_aligned_seqs/"""))
script_info['output_description']="""This results in a multiple sequence alignment (FASTA-formatted)."""
script_info['required_options'] = [\
options_lookup['fasta_as_primary_input'],\
options_lookup['output_dir']
]
pairwise_alignment_method_choices = pairwise_alignment_methods.keys()
blast_db_default_help =\
qiime_config['pynast_template_alignment_blastdb'] or \
'created on-the-fly from template_alignment'
script_info['optional_options'] = [\
make_option('-a','--pairwise_alignment_method',\
type='choice',help='Method to use for pairwise alignments'+\
' [default: %default]',\
default='uclust',choices=pairwise_alignment_method_choices),\
make_option('-d','--blast_db',\
dest='blast_db',help='Database to blast against'+\
' [default: %s]' % blast_db_default_help,
default=qiime_config['pynast_template_alignment_blastdb']),\
make_option('-e','--min_length',\
type='int',help='Minimum sequence '+\
'length to include in alignment [default: 75% of the'+\
' median input sequence length]',\
default=-1),
make_option('-p','--min_percent_id',action='store',\
type='float',help='Minimum percent '+\
'sequence identity to closest blast hit to include sequence in'+\
' alignment [default: %default]',default=75.0),\
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
script_info['version'] = __version__
# pynast_template_alignment_fp is required only if it is not
# provided in qiime_config
if qiime_config['pynast_template_alignment_fp']:
script_info['optional_options'].append(make_option('-t','--template_fp',\
type='string',dest='template_fp',help='Filepath for '+\
'template against [default: %default]',
default=qiime_config['pynast_template_alignment_fp']))
else:
script_info['required_options'].append(make_option('-t','--template_fp',\
type='string',dest='template_fp',\
help='Filepath for template against',
default=qiime_config['pynast_template_alignment_fp']))
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# create dict of command-line options
params = eval(str(opts))
parallel_runner = ParallelAlignSeqsPyNast(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.input_fasta_fp,
opts.output_dir,
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=False)
if __name__ == "__main__":
main() | [
"gregcaporaso@gmail.com"
] | gregcaporaso@gmail.com |
322e397c0f0c080a5c48552626e6c1dd530072c3 | a83bafc38b514a0339a5991be15870551ac49681 | /test/test_raw_material.py | 408000cb4e9c5d50a4d0ba805ba5188e82f5dea7 | [] | no_license | bimdata/python-api-client | 4ec2f81e404ef88d3a7e4d08e18965b598c567a2 | c9b6ea0fbb4729b2a1c10522bdddfe08d944739d | refs/heads/master | 2023-08-17T13:38:43.198097 | 2023-08-09T12:48:12 | 2023-08-09T12:48:12 | 131,603,315 | 0 | 4 | null | 2022-10-10T15:21:26 | 2018-04-30T14:06:15 | Python | UTF-8 | Python | false | false | 900 | py | """
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1 (v1)
Contact: support@bimdata.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import bimdata_api_client
from bimdata_api_client.model.raw_material import RawMaterial
class TestRawMaterial(unittest.TestCase):
"""RawMaterial unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRawMaterial(self):
"""Test RawMaterial"""
# FIXME: construct object with mandatory attributes with example values
# model = RawMaterial() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"infra@bimdata.io"
] | infra@bimdata.io |
b93e64133180e5336b6485e8319d6a56dcd396ef | b01f1b9bafc9c45125fff4c90f0dc5bca8707fe8 | /tests/test_fonts3.py | 76488d42dfc73fa0bb2f46163c86c4af2123e66a | [
"MIT"
] | permissive | bunkahle/PILasOPENCV | a573d0b42b00a22822a85e2260e83ea6f5f642ed | 832bae926614a16a5a3ae882a25576862aff4125 | refs/heads/master | 2023-04-06T19:39:02.163531 | 2022-03-09T16:04:04 | 2022-03-09T16:04:04 | 176,177,923 | 27 | 10 | MIT | 2023-03-19T12:25:54 | 2019-03-18T00:33:31 | Python | UTF-8 | Python | false | false | 711 | py | from __future__ import print_function
import PILasOPENCV as Image
import PILasOPENCV as ImageDraw
import PILasOPENCV as ImageFont
import cv2
# font = ImageFont.truetype("arial.ttf", 30)
size = 20
font = ImageFont.truetype("msgothic.ttc", 22+int(size/50), index=0, encoding="unic")
print(font)
im = Image.new("RGB", (512, 512), "grey")
draw = ImageDraw.Draw(im)
text = "Some text in arial"
draw.text((100, 250), text, font=font, fill=(0, 0, 0))
im = im.resize((256,256), Image.ANTIALIAS)
print(ImageFont.getsize(text, font))
mask = ImageFont.getmask(text, font)
print(type(mask))
cv2.imshow("mask", mask)
im.show()
im_numpy = im.getim()
print(type(im_numpy), im_numpy.shape, im_numpy.dtype) | [
"noreply@github.com"
] | bunkahle.noreply@github.com |
fe3ff6faf74a462febd26d25acda2a52115ffadf | 5ac348d455265b9733b8ae930e45998213f226ac | /AI/lab2/part2/PCA.py | 60def303b7c472146f5b655cbb87f5394aca32a9 | [] | no_license | jsw-zorro/USTC-Junior-Lab | e5eed0f2e1e9b1487d7554f8f8302f74cd4116d1 | 35c3f11b505de72b14e5ca9ea5188825302dcfd9 | refs/heads/master | 2020-04-10T22:42:11.432863 | 2018-07-04T06:09:13 | 2018-07-04T06:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | # -*- coding: utf-8 -*
import numpy as np
import os
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import misc
SAMPLE_NUM = 10
CLASS_NUM = 40
IMG_SHAPE = (112, 92)
scale = 0.5
k = 8
principal_percent = 0.8
def load_faceimg(path_dir, shrink_rate=0.5, train_rate=0.8):
sample_k = int(train_rate * SAMPLE_NUM)
train_m = int(train_rate * SAMPLE_NUM * CLASS_NUM)
test_m = int((1 - train_rate) * SAMPLE_NUM * CLASS_NUM) + 1
shape0 = int(IMG_SHAPE[0] * shrink_rate)
shape1 = int(IMG_SHAPE[1] * shrink_rate)
train_x = np.zeros((train_m, shape0 * shape1))
train_y = np.zeros(train_m).astype(np.int8)
test_x = np.zeros((test_m, shape0 * shape1))
test_y = np.zeros(test_m).astype(np.int8)
print train_x.shape, test_x.shape
for i in range(CLASS_NUM):
face_lable = i + 1
for j in range(SAMPLE_NUM):
filename = path_dir + '/s' + str(face_lable) + '/' + str(j + 1) + '.pgm'
img = misc.imresize(mpimg.imread(filename), shrink_rate).flatten().astype(np.float)
if j < sample_k:
train_x[i * sample_k + j, :] = img
train_y[i * sample_k + j] = face_lable
if j >= sample_k:
test_x[i * (10 - sample_k) + (j - sample_k), :] = img
test_y[i * (10 - sample_k) + (j - sample_k)] = face_lable
return train_x, train_y, test_x, test_y
# 0均值化
def zero_mean(train_x, test_x):
mean_x = train_x.mean(axis = 0).reshape(1, train_x.shape[1])
train_x = train_x - np.repeat(mean_x, train_x.shape[0], axis = 0)
test_x = test_x - np.repeat(mean_x, test_x.shape[0], axis=0)
return train_x, test_x
# PCA降维
def pca(train_x, test_x, threshold):
# step1.零均值化
train_x, test_x = zero_mean(train_x, test_x)
# step2.协方差矩阵
cov = np.cov(train_x, rowvar=0)
# step3.求特征值、特征向量并排序,以及贡献率对应的n值
eig_vals, eig_vecs = np.linalg.eig(cov)
n = threshold_trans(eig_vals, threshold)
eig = np.vstack((eig_vals, eig_vecs))
eig_vecs = np.delete(eig.T[np.lexsort(eig[::-1, :])].T[:, ::-1], 0, axis=0)
# step4.选择前n个特征向量作为基,降维
# n = int(eig_vecs.shape[1]*principal_percent)
eig_vecs = eig_vecs[:, 0:n]
train_x = np.dot(train_x, eig_vecs)
test_x = np.dot(test_x, eig_vecs)
return train_x, test_x, eig_vecs
def threshold_trans(values, ths):
all_values = sum(values)
sorted_values = np.sort(values)
sorted_values = sorted_values[-1::-1]
part_values = 0
n = 0
for value in sorted_values:
part_values += value
n += 1
if part_values >= all_values * ths:
return n
def predict(train_x, train_y, test_x, test_y):
# recognise via measuring educlidean distance in high dimentional space
count = 0
for i in range(test_x.shape[0]):
test_x1 = test_x[i, :].reshape((1, test_x.shape[1]))
sub = train_x - np.repeat(test_x1, train_x.shape[0], axis=0)
dis = np.linalg.norm(sub, axis=1)
fig = np.argmin(dis)
# print i, train_y[fig], test_y[i]
if train_y[fig] == test_y[i]:
count += 1
return count
def plot_face(img):
plt.figure('low dimension map')
r, c = (4, 10)
for i in range(r * c):
plt.subplot(r, c, i + 1)
x = int(math.sqrt(img.shape[1]))
plt.imshow(img[:, i].real.reshape(int(112*0.5), int(92*0.5)), cmap='gray')
plt.axis('off')
plt.show()
threshold = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 0.999999]
# 载入数据集
print '[INFO]loading...'
train_xs, train_y, test_xs, test_y = load_faceimg(os.getcwd() + '/data')
# pca降维
print '[INFO]PCA...'
for ths in threshold:
train_x, test_x, eig_vecs = pca(train_xs, test_xs, ths)
print ths, train_x.shape
# 预测
count = predict(train_x, train_y, test_x, test_y)
correct_rate = count * 1.0 / test_x.shape[0]
print "Correct rate =", correct_rate * 100, "%"
if train_x.shape[1] > 40:
plot_face(eig_vecs) | [
"632679697@qq.com"
] | 632679697@qq.com |
9486c9afd91cf41d204a2a553a2d8a52d5904654 | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p643.py | f23a17357f6d483556d9499124308313657f5eea | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
tmp_sum = sum(nums[:4])
i = 0
ans = tmp_sum
for j in range(k, len(nums)):
tmp_sum = tmp_sum - nums[i] + nums[j]
if tmp_sum > ans:
ans = tmp_sum
i += 1
return float(ans) / k
| [
"xionghhcs@163.com"
] | xionghhcs@163.com |
55c34c0724af09f837aabbb9a2eccc295dfd9049 | 60b35d9219c3cafd5be4c176ceb9694cc7e3f0aa | /planner.py | f9e2b91526eaa6532fd6464c9d70361fca11a84d | [] | no_license | mikesuhan/canvas_automation | 3b201290e4df0401614ffd24ada7b6da2582818b | 8274352ce526a8c2c70e0e8a3428924f72c7797f | refs/heads/main | 2023-02-12T07:37:48.150138 | 2021-01-11T19:30:44 | 2021-01-11T19:30:44 | 326,797,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | import datetime
from dateutil.parser import parse
def date_range(first_day=datetime.datetime(2021, 1, 11, 8, 30), last_day=datetime.datetime(2021, 5, 7, 8, 30)):
delta = last_day - first_day
return list(reversed([last_day - datetime.timedelta(days=x) for x in range(delta.days + 1)]))
def session_range(dates, *times, holidays=('jan 18 2020',)):
"""
Filters a range of dates based on session times
Arguments:
dates: a list of datetime objects
*times: a tuple of the day, start time, and end time of classes e.g. ('Monday', '8am', '10am'
Keyword Arguments:
holidays: a tuple of strings of holiday dates -- these dates are not included in the output
"""
sessions = []
if holidays is None: holidays = []
for date in dates:
# checks to make sure date isn't a holiday
for holiday in holidays:
if type(holiday) == str:
holiday = parse(holiday)
if holiday.day == date.day and holiday.month == date.month and holiday.year == holiday.year:
break
# continues if date is not a holiday
else:
day = date.strftime("%a").lower()
for session in times:
d, ts = session[0], session[1:]
if d.lower().startswith(day):
start_t = parse(ts[0])
start_at = date.replace(hour=start_t.hour, minute=start_t.minute)
if len(ts) > 1:
end_t = parse(ts[1])
end_at = date.replace(hour=end_t.hour, minute=end_t.minute)
else:
end_at = None
sessions.append((start_at, end_at))
return sessions
| [
"you@example.com"
] | you@example.com |
91cd2668c52a78e788bcdfe46bbb7e63ca3de71d | 395f93442d1d41ad228d62d4c15d197dbc1d1363 | /apps/user_operation/migrations/0004_auto_20181204_0942.py | fea67e4bcad34420f151c4ff625b83f50c1fd67b | [] | no_license | vevoly/ShopDjango | e0e310538eb4cdad0977f8ced1da6382a1441c67 | 8c25cf35797951c2a2d16933afedfa28689b597c | refs/heads/master | 2020-04-23T22:03:31.200141 | 2019-02-19T14:34:57 | 2019-02-19T14:34:57 | 171,489,038 | 0 | 0 | null | 2020-02-12T02:44:05 | 2019-02-19T14:33:04 | JavaScript | UTF-8 | Python | false | false | 1,108 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-04 09:42
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_operation', '0003_auto_20181117_1121'),
]
operations = [
migrations.AlterField(
model_name='useraddress',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 105424), help_text='添加时间', verbose_name='添加时间'),
),
migrations.AlterField(
model_name='userfav',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 103424), help_text='添加时间', verbose_name='添加时间'),
),
migrations.AlterField(
model_name='userleavingmessage',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 104424), help_text='添加时间', verbose_name='添加时间'),
),
]
| [
"jevoly@163.com"
] | jevoly@163.com |
6f1155fa56134bb787b2fc17e62b2b06bf1c3850 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_blitzes.py | e5dac4a2227faeea262af941d833107c53afb89e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._blitz import _BLITZ
#calss header
class _BLITZES(_BLITZ, ):
def __init__(self,):
_BLITZ.__init__(self)
self.name = "BLITZES"
self.specie = 'verbs'
self.basic = "blitz"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
319f8d28ab811b2e7eaf832c142ce5a9f1993d33 | 6766c01dee6c6330a62e14d5c036eedb60887228 | /book/admin.py | 04b64bbc505dc63b03a804135fa5da5558baf3c5 | [] | no_license | whu2017/easyreading | 5fbf299ab1d2e489e6dfd881a466852d646bbb52 | 71b2936345f9253648c046a68839c7164e506bfe | refs/heads/master | 2020-04-06T04:13:32.918077 | 2017-05-24T01:27:06 | 2017-05-24T01:27:06 | 83,019,406 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from book.models import Category, Book, Comment
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', )
class BookAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'author', 'price', 'score', 'total_chapter', 'allow_trial',
'trial_chapter', 'create_timestamp', 'update_timestamp')
class CommentAdmin(admin.ModelAdmin):
list_display = ('user', 'book', 'score', 'content', 'timestamp')
admin.site.register(Category, CategoryAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Comment, CommentAdmin)
| [
"doraemonext@gmail.com"
] | doraemonext@gmail.com |
9fa7197b8a44396a777f1f416ab3e8488903a9b1 | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /swexpert/1859(백만 장자).py | 88fa455f52e94d8e73ed650c5a4527801d43a941 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | import sys
sys.stdin = open('input1.txt','r')
T=int(input())
for i in range(0, T):
day = int(input())
dayprice = list(map(int, input().split()))
maxprice = dayprice[len(dayprice)-1]
benefit = 0
buy = 0
for j in range(day-2, -1, -1):
if dayprice[j] < maxprice:
benefit += maxprice-dayprice[j]
else:
maxprice = dayprice[j]
print('#{0} {1}'.format(i+1, benefit))
# for tc in range(1, int(input())+1):
# N = int(input())
# costs = list(map(int, input().split()))
#
# result = 0
# while True:
# max_value = max(costs)
# max_idx = costs.index(max_value)
# total = 0
# if max_idx != 0:
# total = max_value * max_idx
# for i in range(max_idx):
# total -= costs[i]
# result += total
#
# if max_idx == len(costs)-1 or max_idx == len(costs)-2:
# break
# else:
# costs = costs[max_idx+1:]
#
# print(f'#{tc} {result}')
| [
"dea8307@naver.com"
] | dea8307@naver.com |
13ad959a6218c2871702b4ef16bfccf686044504 | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /intro/searching.py | 7d3678bf41cb22f6d1c32d55870e4744d264fc59 | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # this line will show some book samples in NLTK.
from nltk.book import *
## concordance
# print('Sense and Sensibility by Jane Austen 1811')
# print(text2.concordance('affection'))
# print('text5: Chat Corpus')
print(text5.concordance('lol'))
## similarity
# print(text1.similar('monstrous'))
## common contexts
# print(text2.common_contexts(["monstrous", "very"]))
## dispersion plot
# text4.dispersion_plot(['citizens', 'democracy', 'freedom', 'duties', 'America'])
## generate is note supported now?
# print(text3.generate())
## | [
"anderscui@gmail.com"
] | anderscui@gmail.com |
fd6feb2ed457231f5f56dceff0819d45e00509b8 | 343bdaddfc66c6316e2cee490e9cedf150e3a5b7 | /0001_0100/0094/0094.py | b366673455e6be0051362104cde337887818eb30 | [] | no_license | dm-alexi/acmp | af7f6b4484b78f5922f3b464406a0ba5dea0d738 | 3fa0016d132adfeab7937b3e8c9687a34642c93a | refs/heads/master | 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | with open("input.txt", "r") as f, open("output.txt", "w") as q:
n, m, k = (int(x) for x in f.read().split())
q.write("1" if n >= m else "NO" if n <= k else str((m - n - 1) // (n - k) + 2))
| [
"dm2.alexi@gmail.com"
] | dm2.alexi@gmail.com |
39d81f04162ffe643e220fbda57ad7cee54f091e | 873d9322f0d9296a0eda49bba65faba3a7ba62e3 | /kontrasto/templatetags/kontrasto_tags.py | 9918722e2f02363153ae4fafe2370029bd7c40a1 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | nimasmi/kontrasto | d9910ca015f7d4263b7d9b829f3282936cfbb0b9 | 08fc0279e2b3e1da1a5cec769874572455fd0527 | refs/heads/main | 2023-04-19T21:18:47.677839 | 2021-05-15T00:42:50 | 2021-05-15T00:42:50 | 370,022,377 | 0 | 0 | MIT | 2021-05-23T10:28:35 | 2021-05-23T10:28:34 | null | UTF-8 | Python | false | false | 1,881 | py | from django import template
from kontrasto import wcag_2, wcag_3
register = template.Library()
@register.filter(name="dominant_color")
def dominant_color(image):
return image.get_dominant_color()
@register.filter(name="wcag_2_contrast")
def wcag_2_contrast(image, text_color: str) -> str:
return wcag_2.wcag2_contrast(image.get_dominant_color(), text_color)
@register.simple_tag(name="wcag_2_contrast_light_or_dark")
def wcag_2_contrast_light_or_dark(
image, light_color: str, dark_color: str
) -> str:
dominant = image.get_dominant_color()
light_contrast = wcag_2.wcag2_contrast(dominant, light_color)
dark_contrast = wcag_2.wcag2_contrast(dominant, dark_color)
lighter = light_contrast > dark_contrast
return {
"text_color": light_color if lighter else dark_color,
"text_theme": "light" if lighter else "dark",
"bg_color": dominant,
"bg_color_transparent": f"{dominant}aa",
"bg_theme": "dark" if lighter else "light",
}
@register.filter(name="wcag_3_contrast")
def wcag_3_contrast(image, text_color: str) -> str:
return wcag_3.apca_contrast(image.get_dominant_color(), text_color)
@register.simple_tag(name="wcag_3_contrast_light_or_dark")
def wcag_3_contrast_light_or_dark(
image, light_color: str, dark_color: str
) -> str:
dominant = image.get_dominant_color()
light_contrast = wcag_3.format_contrast(
wcag_3.apca_contrast(dominant, light_color)
)
dark_contrast = wcag_3.format_contrast(
wcag_3.apca_contrast(dominant, dark_color)
)
lighter = light_contrast > dark_contrast
return {
"text_color": light_color if lighter else dark_color,
"text_theme": "light" if lighter else "dark",
"bg_color": dominant,
"bg_color_transparent": f"{dominant}aa",
"bg_theme": "dark" if lighter else "light",
}
| [
"thibaudcolas@gmail.com"
] | thibaudcolas@gmail.com |
065cde2487f798bbdd3629817a89aac06a72872c | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/view/meta/PremiumWindowMeta.py | 938001af0b0a21975d24c7e234953c27e7860c41 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,076 | py | # 2016.11.19 19:51:28 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/PremiumWindowMeta.py
from gui.Scaleform.daapi.view.meta.SimpleWindowMeta import SimpleWindowMeta
class PremiumWindowMeta(SimpleWindowMeta):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends SimpleWindowMeta
"""
def onRateClick(self, rateId):
self._printOverrideError('onRateClick')
def as_setHeaderS(self, prc, bonus1, bonus2):
if self._isDAAPIInited():
return self.flashObject.as_setHeader(prc, bonus1, bonus2)
def as_setRatesS(self, data):
"""
:param data: Represented by PremiumWindowRatesVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setRates(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\view\meta\PremiumWindowMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:51:28 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
1ab1c26e10ab8717734ea8ad5224365ae174f7e4 | c4b8e1e09dedbccd37ca008ecaaca4438610bbaf | /google_or_tools/futoshiki_sat.py | 0eac27b21330dd3171807643a0076cc4144a12bc | [
"MIT"
] | permissive | hakank/hakank | 4806598b98cb36dd51b24b0ab688f52dadfe9626 | c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2 | refs/heads/master | 2023-08-15T00:21:52.750270 | 2023-07-27T16:21:40 | 2023-07-27T16:21:40 | 11,933,517 | 336 | 97 | MIT | 2023-07-27T11:19:42 | 2013-08-06T20:12:10 | JavaScript | UTF-8 | Python | false | false | 4,036 | py | # Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Futoshiki problem in OR-tools CP-SAT Solver.
From http://en.wikipedia.org/wiki/Futoshiki
'''
The puzzle is played on a square grid, such as 5 x 5. The objective
is to place the numbers 1 to 5 (or whatever the dimensions are)
such that each row, and column contains each of the digits 1 to 5.
Some digits may be given at the start. In addition, inequality
constraints are also initially specifed between some of the squares,
such that one must be higher or lower than its neighbour. These
constraints must be honoured as the grid is filled out.
'''
Also see
http://www.guardian.co.uk/world/2006/sep/30/japan.estheraddley
This model is inspired by the Minion/Tailor
example futoshiki.eprime.
It's a port of my old CP model futoshiki.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main(values, lt):
model = cp.CpModel()
#
# data
#
size = len(values)
RANGE = list(range(size))
NUMQD = list(range(len(lt)))
#
# variables
#
field = {}
for i in RANGE:
for j in RANGE:
field[i, j] = model.NewIntVar(1, size, "field[%i,%i]" % (i, j))
field_flat = [field[i, j] for i in RANGE for j in RANGE]
#
# constraints
#
# set initial values
for row in RANGE:
for col in RANGE:
if values[row][col] > 0:
model.Add(field[row, col] == values[row][col])
# all rows have to be different
for row in RANGE:
model.AddAllDifferent([field[row, col] for col in RANGE])
# all columns have to be different
for col in RANGE:
model.AddAllDifferent([field[row, col] for row in RANGE])
# all < constraints are satisfied
# Also: make 0-based
for i in NUMQD:
model.Add(
field[lt[i][0] - 1, lt[i][1] - 1] < field[lt[i][2] - 1, lt[i][3] - 1])
#
# search and result
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
for i in RANGE:
for j in RANGE:
print(solver.Value(field[i, j]), end=" ")
print()
print()
# print("num_solutions:", num_solutions)
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
#
# Example from Tailor model futoshiki.param/futoshiki.param
# Solution:
# 5 1 3 2 4
# 1 4 2 5 3
# 2 3 1 4 5
# 3 5 4 1 2
# 4 2 5 3 1
#
# Futoshiki instance, by Andras Salamon
# specify the numbers in the grid
#
values1 = [[0, 0, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
# [i1,j1, i2,j2] requires that values[i1,j1] < values[i2,j2]
# Note: 1-based
lt1 = [[1, 2, 1, 1], [1, 4, 1, 5], [2, 3, 1, 3], [3, 3, 2, 3], [3, 4, 2, 4],
[2, 5, 3, 5], [3, 2, 4, 2], [4, 4, 4, 3], [5, 2, 5, 1], [5, 4, 5, 3],
[5, 5, 4, 5]]
#
# Example from http://en.wikipedia.org/wiki/Futoshiki
# Solution:
# 5 4 3 2 1
# 4 3 1 5 2
# 2 1 4 3 5
# 3 5 2 1 4
# 1 2 5 4 3
#
values2 = [[0, 0, 0, 0, 0], [4, 0, 0, 0, 2], [0, 0, 4, 0, 0], [0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]]
# Note: 1-based
lt2 = [[1, 2, 1, 1], [1, 4, 1, 3], [1, 5, 1, 4], [4, 4, 4, 5], [5, 1, 5, 2],
[5, 2, 5, 3]]
if __name__ == "__main__":
print("Problem 1")
main(values1, lt1)
print("\nProblem 2")
main(values2, lt2)
| [
"hakank@gmail.com"
] | hakank@gmail.com |
2fce7a68118cfb2a297a7f558fcf02e1990f725a | 882c2b3c410b838372d43e431d1ccd6e02ba45f6 | /AlMgSiMC/cylinder_khachaturyan.py | 7025b331326e9df285f6023052ba55b49b3f0ad5 | [] | no_license | davidkleiven/GPAWTutorial | d46f7b8750172ba5ff36ccc27f97089cac94fd95 | 0bffc300df1d048142559855d3ccb9d0d8074d2e | refs/heads/master | 2021-06-08T05:44:42.784850 | 2021-02-25T10:23:28 | 2021-02-25T10:23:28 | 98,557,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import numpy as np
from apal import Khachaturyan
import matplotlib as mpl
mpl.rcParams.update({'font.size': 18, 'axes.unicode_minus': False, 'svg.fonttype': 'none'})
from matplotlib import pyplot as plt
C_al = np.array([[0.62639459, 0.41086487, 0.41086487, 0, 0, 0],
[0.41086487, 0.62639459, 0.41086487, 0, 0, 0],
[0.41086487, 0.41086487, 0.62639459, 0, 0, 0],
[0, 0, 0, 0.42750351, 0, 0],
[0, 0, 0, 0, 0.42750351, 0],
[0, 0, 0, 0, 0, 0.42750351]])
SIZE = 512
MISFIT = np.array([[0.0440222, 0.00029263, 0.0008603],
[0.00029263, -0.0281846, 0.00029263],
[0.0008603, 0.00029263, 0.0440222]])
def strain_energy(radius, length):
from cylinder import create_cylinder
khach = Khachaturyan(elastic_tensor=C_al, misfit_strain=MISFIT)
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_cylinder(voxels, radius, length, SIZE)
print("Created cylinder")
energy = khach.strain_energy_voxels(voxels)
print("Strain energy: {} meV/A^3".format(energy*1000))
return energy*1000.0
def strain_ellipsoid(a, b, c):
from cylinder import create_ellipsoid
khach = Khachaturyan(elastic_tensor=C_al, misfit_strain=MISFIT)
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_ellipsoid(voxels, a, b, c, SIZE)
print("Created ellipsoid")
energy = khach.strain_energy_voxels(voxels)
print("Strain energy: {} meV/A^3 (a={},b={},c={})".format(energy*1000, a, b, c))
return energy*1000.0
def calculate_all():
r = 20
data = []
for d in range(2, 200, 4):
energy = strain_energy(r, d)
data.append([r, d, energy])
fname = "data/strain_energy_cylinder{}.csv".format(int(r))
np.savetxt(fname, data, delimiter=",", header="Radius (A), Length (A), Energy (meV/A^3)")
def calculate_ellipsoid():
a = c = 20
data = []
flip_ba = True
for b in list(range(2, 20, 4)) + list(range(20, 200, 20)):
if flip_ba:
energy = strain_ellipsoid(b, a, c)
else:
energy = strain_ellipsoid(a, b, c)
data.append([a, b, c, energy])
if flip_ba:
fname = "data/strain_energy_ellipsoid{}_flipped.csv".format(int(a))
else:
fname = "data/strain_energy_ellipsoid{}.csv".format(int(a))
np.savetxt(fname, data, delimiter=",", header="Half-axis x (A), Half-axis y (A), Half-axis z (A), Energy (meV/A^3)")
def save_voxels(radius, length):
from cylinder import create_cylinder
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_cylinder(voxels, radius, length, SIZE)
voxels = np.array(voxels, dtype=np.uint8)
fname = "/work/sophus/cylinder_R{}_L{}.bin".format(int(radius), int(length))
voxels.tofile(fname)
print("Voxels written to {}".format(fname))
def plot_strain_energy(fname):
data = np.loadtxt(fname, delimiter=",")
aspect = data[:, 1]/data[:, 0]
energy = data[:, 2]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(aspect, energy, color="#5d5c61")
ax.set_xlabel("Aspect ratio (L/R)")
ax.set_ylabel(r"Strain energy (meV/\r{A}\$^3\$)")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.show()
def plot_strain_energy_ellipsoids():
data = np.loadtxt("data/strain_energy_ellipsoid20.csv", delimiter=",")
data_flipped = np.loadtxt("data/strain_energy_ellipsoid20_flipped.csv", delimiter=",")
aspect = data[:, 1]/data[:, 0]
aspect_flipped = data_flipped[:, 1]/data_flipped[:, 0]
energy = data[:, 3]
energy_flipped = data_flipped[:, 3]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(aspect, energy, color="#5d5c61", marker="o", mfc="none")
ax.plot(aspect_flipped, energy_flipped, color="#557a95", marker="v", mfc="none")
ax.set_xlabel("Aspect ratio (L/R)")
ax.set_ylabel(r"Strain energy (meV/\r{A}\$^3\$)")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.show()
#calculate_all()
#calculate_ellipsoid()
plot_strain_energy_ellipsoids()
#plot_strain_energy("data/strain_energy_cylinder20.csv")
#save_voxels(50, 400)
| [
"davidkleiven446@gmail.com"
] | davidkleiven446@gmail.com |
9872ed3d75f6b59b33b85328df1ac8e4c7ea2837 | 214942f8cf694227d32077accd8aa379c26b4830 | /ooi_instrument_agent/utils.py | 2b2a23ad7e8bce1df9cd47a0b66c4ece8d784abe | [
"Apache-2.0"
] | permissive | oceanobservatories/ooi-instrument-agent | 76084b2c554f195983550aa67d3c86e66a39f525 | e22e4300079468bb99c543cbbf1cb5c8b4a96897 | refs/heads/master | 2021-01-21T14:58:10.847453 | 2016-06-21T18:15:19 | 2016-06-21T18:18:24 | 58,216,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py | import json
import logging
from flask import request
from werkzeug.exceptions import abort
from ooi_instrument_agent.client import ZmqDriverClient
DEFAULT_TIMEOUT = 90000
log = logging.getLogger(__name__)
def get_client(consul, driver_id):
"""
Create a ZmqDriverClient for the specified driver_id
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: ZmqDriverClient if found, otherwise 404
"""
return ZmqDriverClient(*get_host_and_port(consul, driver_id))
def get_host_and_port(consul, driver_id):
"""
Return the host and port for the specified driver_id
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: host, port if found, otherwise 404
"""
host_and_port = get_service_host_and_port(consul, 'instrument_driver', tag=driver_id)
if host_and_port is None:
abort(404)
return host_and_port
def get_service_host_and_port(consul, service_id, tag=None):
"""
Return the first passing host and port for the specified service_id
:param consul: Instance of consul.Consul
:param service_id: service_id
:param tag: tag
:return: host, port if found, otherwise None
"""
index, matches = consul.health.service(service_id, tag=tag, passing=True)
for match in matches:
host = match.get('Node', {}).get('Address')
port = match.get('Service', {}).get('Port')
if host and port:
return host, port
def list_drivers(consul):
"""
Return a list of all passing drivers currently registered in Consul
:param consul: Instance of consul.Consul
:return: List of reference designators
"""
drivers = []
index, passing = consul.health.service('instrument_driver', passing=True)
for each in passing:
tags = each.get('Service', {}).get('Tags', [])
drivers.extend(tags)
return drivers
def get_port_agent(consul, driver_id):
"""
Fetch the port agent information for the specified driver from Consul
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: Dictionary containing the port agent data for the specified driver
"""
return_dict = {}
for name, service_id in [('data', 'port-agent'),
('command', 'command-port-agent'),
('sniff', 'sniff-port-agent'),
('da', 'da-port-agent')]:
host_and_port = get_service_host_and_port(consul, service_id, tag=driver_id)
if host_and_port:
host, port = host_and_port
return_dict[name] = {'host': host, 'port': port}
if return_dict:
return return_dict
abort(404)
def get_from_request(name, default=None):
"""
Extract the target parameter from a Flask request object. Attempts to do the right
thing whether the input data was passed as URL query params, a form or as JSON.
:param name: Target parameter
:param default: Default value to return if not found
:return: Extracted value if found, else default
"""
def extract(value_dict, name):
val = value_dict.get(name)
if val is None:
return default
try:
val = json.loads(val)
except (TypeError, ValueError):
pass
return val
if request.args:
return extract(request.args, name)
if request.form:
return extract(request.form, name)
if request.json:
return request.json.get(name, default)
return default
def get_timeout():
"""
Get the timeout from the request object as an int
:return: timeout
"""
val = get_from_request('timeout')
try:
return int(val)
except (ValueError, TypeError):
return DEFAULT_TIMEOUT
| [
"petercable@gmail.com"
] | petercable@gmail.com |
1a3dcd0ed91952cbd51126d875f0e262109a6f94 | 6244c2efe590494e0870253269e269848f8debe4 | /BooleanNetworks/LEMScores/parseLEMscores_malaria_40hr.py | 272717bc1a9856b72ebf33574f87bff521033bb8 | [] | no_license | breecummins/BooleanNetworks | 53db4dc4e50d5d571344ed55b65efb66a1c4328d | 074409a6dd569b2f0ce3602e7dfda496db08cd01 | refs/heads/master | 2021-09-03T20:31:53.237492 | 2018-01-11T19:55:32 | 2018-01-11T19:55:32 | 117,146,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,021 | py | import parseLEMscores_yeast_mouse as PLS
import parseLEMscores_malaria_20hr as PLS20
from networkbuilder_yeast_mouse import createNetworkFile
import time
def parseLEMfile(bound=0,fname='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt'):
# returns the source, target, and type of regulation sorted by decreasing LEM score (also returned)
source=[]
type_reg=[]
target=[]
lem_score=[]
with open(fname,'r') as f:
for _ in range(8):
f.readline()
for l in f.readlines():
wordlist=l.split()
lem = float(wordlist[5])
if lem>bound:
target.append(wordlist[0])
lem_score.append(lem)
two_words=wordlist[2].split('(')
type_reg.append(two_words[0])
source.append(two_words[1][:-1])
[lem_score,source,target,type_reg] = PLS.sort_by_list_in_reverse(lem_score,[source,target,type_reg])
return source,target,type_reg,lem_score
def generateResult(threshold=0.1,frontname='malaria40hr_90TF_top25',makegraph=1,saveme=1,onlylargestnetwork=0,LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt',new_network_path='',new_network_date='',essential=True):
print 'Parsing file...'
source,target,type_reg,lem_score=parseLEMfile(threshold,LEMfile)
genes = sorted(set(source).intersection(target))
# print genes
print 'Making outedges...'
outedges,regulation,LEM_scores=PLS20.makeOutedges(genes,source,target,type_reg,lem_score)
# print outedges
print 'Extracting strongly connected components...'
grouped_scc_gene_inds=PLS20.strongConnectIndices(outedges)
scc_genenames=[[genes[g] for g in G] for G in grouped_scc_gene_inds ]
# print scc_genes
if onlylargestnetwork:
L = [len(g) for g in grouped_scc_gene_inds]
ind=L.index(max(L))
grouped_scc_gene_inds = grouped_scc_gene_inds[ind]
flat_scc_gene_inds = grouped_scc_gene_inds[:]
scc_genenames = scc_genenames[ind]
flat_scc_genenames = scc_genenames[:]
else:
flat_scc_gene_inds= [g for G in grouped_scc_gene_inds for g in G]
flat_scc_genenames = [s for S in scc_genenames for s in S]
outedges,regulation,LEM_scores=PLS20.pruneOutedges(flat_scc_gene_inds,outedges,regulation,LEM_scores)
if makegraph:
print 'Making graph for {} nodes and {} edges....'.format(len(flat_scc_gene_inds),len([o for oe in outedges for o in oe]))
PLS.makeGraph(flat_scc_genenames,outedges,regulation,name='{}_graph_thresh{}.pdf'.format(frontname,str(threshold).replace('.','-')))
if saveme:
createNetworkFile(flat_scc_genenames,outedges,regulation,new_network_path+'{}D_'.format(len(flat_scc_genenames))+time.strftime("%Y_%m_%d")+'_{}_T{}'.format(frontname,str(threshold).replace('.','-')) + '_essential'*essential +'.txt',[essential]*len(flat_scc_genenames))
if __name__ == "__main__":
# frontname='malaria40hr_90TF_top25'
# new_network_path = '/Users/bcummins/GIT/DSGRN/networks/'
# LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt'
# for threshold in [0.01, 0.0075, 0.005, 0.001]:
# generateResult(threshold,frontname,1,1,1,LEMfile,new_network_path,True)
frontname='malaria40hr_50TF_top25'
new_network_path = '/Users/bcummins/GIT/DSGRN/networks/'
LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_50tfs_top25_dljtk_lem_score_table.txt'
makegraph=1
saveme=0
onlylargestnetwork=0
essential=True
for threshold in [0.02]:
generateResult(threshold,frontname,makegraph,saveme,onlylargestnetwork,LEMfile,new_network_path,essential)
| [
"breecummins@gmail.com"
] | breecummins@gmail.com |
9a670955cc54404b943dfc93a7b7692e7f24ee44 | 00b5ad360284adc06f7e7ca9b2d1c2d3a0edd6f9 | /recycle/CRF-C-LR.py | 5571d01c9c38fafa50e8533efab0bdcfe00946ba | [] | no_license | ShenDezhou/CBLSTM | e09d36f609df2b34ace2ae8085d2232039838675 | b5ac4714f8ea14cf2bfd6ce6033eb697ef078686 | refs/heads/master | 2021-04-16T19:47:44.758194 | 2020-07-20T06:21:08 | 2020-07-20T06:21:08 | 249,381,106 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2019年5月17日
@author: Administrator
'''
from sklearn.feature_extraction.text import CountVectorizer
import os
import codecs
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pkuseg
class Sentiment(object):
vectorizer=None
log_model=None
acc_score=None
def __init__(self):
pass
@classmethod
def load_model(cls_obj):
data = []
data_labels = []
for filename in os.listdir(u"./hotelcomment/正面"):
if filename.endswith(".txt"):
with codecs.open("./hotelcomment/正面/"+filename, 'r', encoding='utf-8') as f:
text = f.read()
data.append(text)
data_labels.append('pos')
continue
else:
continue
for filename in os.listdir(u"./hotelcomment/负面"):
if filename.endswith(".txt"):
with codecs.open(u"./hotelcomment/负面/"+filename, 'r', encoding='utf-8') as f:
text = f.read()
data.append(text)
data_labels.append('neg')
continue
else:
continue
print(len(data), len(data_labels))
seg = pkuseg.pkuseg(model_name='web')
cls_obj.vectorizer = CountVectorizer(
analyzer = lambda text: seg.cut(text),
lowercase = False,
)
features = cls_obj.vectorizer.fit_transform(
data
)
features_nd = features.toarray()
X_train, X_test, y_train, y_test = train_test_split(
features_nd,
data_labels,
train_size=0.80,
random_state=1234)
cls_obj.log_model = LogisticRegression()
cls_obj.log_model = cls_obj.log_model.fit(X=X_train, y=y_train)
y_pred = cls_obj.log_model.predict(X_test)
cls_obj.acc_score=accuracy_score(y_test, y_pred)
return cls_obj | [
"bangtech@sina.com"
] | bangtech@sina.com |
f91f09dca1cd6719bb83aa81dbb34abf79e48761 | f0b75bd94f133a13f469f429a696f26be3be9862 | /week_4/.history/class_exercise1_20200217114534.py | 797ca5e3e1d3292edc31ea02837aa9efe73bbf2a | [] | no_license | dechavez4/Python_handin_assignments | 023350fabd212cdf2a4ee9cd301306dc5fd6bea0 | 82fd8c991e560c18ecb2152ea5a8fc35dfc3c608 | refs/heads/master | 2023-01-11T23:31:27.220757 | 2020-05-22T10:33:56 | 2020-05-22T10:33:56 | 237,179,899 | 0 | 0 | null | 2022-12-30T20:14:04 | 2020-01-30T09:30:16 | Python | UTF-8 | Python | false | false | 1,235 | py | import numpy as np
a = np.arange(10,30).reshape(4,5)
#exercise 1 table
yellow = a[0,0]
green = a[:3, 2]
teal = a[:, (1,3)]
blue = a[::2, 4]
red = a[0, 1:4]
#print('yellow= ', yellow, 'green= ', green, 'blue= ', blue, 'teal=', teal, 'red=', red)
#exercise 2 cube:
c = np.arange(0, 27).reshape((3, 3, 3)) # = (z, y, x)
slice1 = c[1, 1, :]
slice2 = c[:, 1 , 0 ]
slice3 = c[0, :, 2]
#print('slice1 = ', slice1, 'slice2 = ', slice2, 'slice3 = ', slice3)
#exercise 3 masking:
data = np.arange(1,101).reshape(10,10)
even = data[data % 2 == 0]
sixOnly = np.where(data % 10 == 6)
six = data[sixOnly]
#print('even =', even, 'sixOnly', six)
#exercise 4 numpy and csv:
filename = 'befkbhalderstatkode.csv'
bef_stats_df = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
dd = bef_stats_df
mask_year_2015 = dd[:, 0] == 2015
mask_german = dd[:,3] == 5180
german_children_mask = (mask_year_2015 & mask_german & (dd[:, 2] <= 0))
german_children = np.sum(dd[(german_children_mask)][:, 4])
#print(german_children)
def showNum(arr, bydel, alder, statkode):
parts = (dd[:,0] == arr) & (dd[:,3] == bydel) & (dd[:,2] <= alder) & (dd[:,1] <=bydel)
partsData = dd[parts]
print(partsData)
showNum(2015, 2, 0, 5180) | [
"chavezgamingv2@hotmail.com"
] | chavezgamingv2@hotmail.com |
bf6d916adf0631e19932c2e5f3d01cddfc18a72e | ee409ec2e421bdac5988fcbe6592b05824b51d58 | /google-datacatalog-qlik-connector/tests/google/datacatalog_connectors/qlik/scrape/engine_api_dimensions_helper_test.py | da581d76cc0bbe1954ca4808e0652eabc188a810 | [
"Apache-2.0",
"Python-2.0"
] | permissive | GoogleCloudPlatform/datacatalog-connectors-bi | 7b11ed25856e83c8bd4b701dd836e0d20815caf7 | 58cc57e12632cbd1e237b3d6930e519333c51f4e | refs/heads/master | 2023-04-01T14:27:24.548547 | 2022-02-12T09:55:56 | 2022-02-12T09:55:56 | 259,464,922 | 34 | 18 | Apache-2.0 | 2022-02-12T09:55:57 | 2020-04-27T21:51:45 | Python | UTF-8 | Python | false | false | 6,322 | py | #!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from unittest import mock
from google.datacatalog_connectors.qlik.scrape import \
engine_api_dimensions_helper
from . import scrape_ops_mocks
class EngineAPIDimensionsHelperTest(unittest.TestCase):
__SCRAPE_PACKAGE = 'google.datacatalog_connectors.qlik.scrape'
__BASE_CLASS = f'{__SCRAPE_PACKAGE}.base_engine_api_helper' \
f'.BaseEngineAPIHelper'
__HELPER_CLASS = f'{__SCRAPE_PACKAGE}.engine_api_dimensions_helper' \
f'.EngineAPIDimensionsHelper'
def setUp(self):
self.__helper = engine_api_dimensions_helper.EngineAPIDimensionsHelper(
server_address='https://test-server', auth_cookie=mock.MagicMock())
@mock.patch(f'{__HELPER_CLASS}._EngineAPIDimensionsHelper__get_dimensions',
lambda *args: None)
@mock.patch(f'{__BASE_CLASS}._run_until_complete')
def test_get_dimensions_should_raise_unknown_exception(
self, mock_run_until_complete):
mock_run_until_complete.side_effect = Exception
self.assertRaises(Exception, self.__helper.get_dimensions, 'app_id')
@mock.patch(f'{__HELPER_CLASS}._EngineAPIDimensionsHelper__get_dimensions',
lambda *args: None)
@mock.patch(f'{__BASE_CLASS}._run_until_complete')
def test_get_dimensions_should_return_empty_list_on_timeout(
self, mock_run_until_complete):
mock_run_until_complete.side_effect = asyncio.TimeoutError
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(0, len(dimensions))
# BaseEngineAPIHelper._hold_websocket_communication is purposefully not
# mocked in this test case in order to simulate a full send/reply scenario
# with replies representing an App with Dimensions. Maybe it's worth
# refactoring it in the future to mock that method, and the private async
# ones from EngineAPIDimensionsHelper as well, thus testing in a more
# granular way.
@mock.patch(f'{__BASE_CLASS}._generate_message_id')
@mock.patch(f'{__BASE_CLASS}._send_get_all_infos_message')
@mock.patch(f'{__BASE_CLASS}._BaseEngineAPIHelper__send_open_doc_message')
@mock.patch(f'{__BASE_CLASS}._connect_websocket',
new_callable=scrape_ops_mocks.AsyncContextManager)
def test_get_dimensions_should_return_list_on_success(
self, mock_websocket, mock_send_open_doc, mock_send_get_all_infos,
mock_generate_message_id):
mock_send_open_doc.return_value = asyncio.sleep(delay=0, result=1)
mock_send_get_all_infos.return_value = asyncio.sleep(delay=0, result=2)
mock_generate_message_id.side_effect = [3, 4]
websocket_ctx = mock_websocket.return_value.__enter__.return_value
websocket_ctx.set_itr_break(0.25)
websocket_ctx.set_data([
{
'id': 1,
'result': {
'qReturn': {
'qHandle': 1,
},
},
},
{
'id': 2,
'result': {
'qInfos': [{
'qId': 'dimension-id',
'qType': 'dimension'
}],
},
},
{
'id': 3,
'result': {
'qReturn': {
'qHandle': 2,
},
},
},
{
'id': 4,
'result': {
'qProp': [{
'qInfo': {
'qId': 'dimension-id',
},
}],
},
},
])
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(1, len(dimensions))
self.assertEqual('dimension-id', dimensions[0].get('qInfo').get('qId'))
mock_send_open_doc.assert_called_once()
mock_send_get_all_infos.assert_called_once()
# BaseEngineAPIHelper._hold_websocket_communication is purposefully not
# mocked in this test case in order to simulate a full send/reply scenario
# with replies representing an App with no Dimensions. Maybe it's worth
# refactoring it in the future to mock that method, and the private async
# ones from EngineAPIDimensionsHelper as well, thus testing in a more
# granular way.
@mock.patch(f'{__BASE_CLASS}._send_get_all_infos_message')
@mock.patch(f'{__BASE_CLASS}._BaseEngineAPIHelper__send_open_doc_message')
@mock.patch(f'{__BASE_CLASS}._connect_websocket',
new_callable=scrape_ops_mocks.AsyncContextManager)
def test_get_dimensions_should_return_empty_list_on_none_available(
self, mock_websocket, mock_send_open_doc, mock_send_get_all_infos):
mock_send_open_doc.return_value = asyncio.sleep(delay=0, result=1)
mock_send_get_all_infos.return_value = asyncio.sleep(delay=0, result=2)
websocket_ctx = mock_websocket.return_value.__enter__.return_value
websocket_ctx.set_itr_break(0.25)
websocket_ctx.set_data([
{
'id': 1,
'result': {
'qReturn': {
'qHandle': 1,
},
},
},
{
'id': 2,
'result': {
'qInfos': [],
},
},
])
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(0, len(dimensions))
mock_send_open_doc.assert_called_once()
mock_send_get_all_infos.assert_called_once()
| [
"noreply@github.com"
] | GoogleCloudPlatform.noreply@github.com |
6c7b94252cf23796c1c645176f35159465ceabce | 33cf73bf603ffe09ad763fca4103e979ed50a4bc | /service_api/cd/NightWorkSpider.py | 43e27efb13f5b1eaca5c928bbe078a11de05959a | [] | no_license | daddvted/excavat0r | f73d05670766d5f47ef5d7e443289851fc172906 | 8c2c56b6395bede4135fd859b1338831345054b6 | refs/heads/master | 2022-06-09T11:51:34.461893 | 2018-12-12T10:06:42 | 2018-12-12T10:06:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,876 | py | """
夜间施工查询
URL: http://www.cdcc.gov.cn/QualitySafeShow/NightWorkList.aspx
"""
import re
import time
import random
import requests
import lxml.html
import mysql.connector
from urllib.parse import urlencode
class NightWorkSpider(object):
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 4.4.4; zh-cn; MI NOTE LTE Build/KTU84P) AppleWebKit/533.1 (KHTML, like Gecko)Version/4.0 MQQBrowser/5.4 TBS/025489 Mobile Safari/533.1 MicroMessenger/6.3.13.49_r4080b63.740 NetType/cmnet Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D15 MicroMessenger/6.3.13 NetType/WIFI Language/zh_CN",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Shuame; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.9.1.1000 Chrome/39.0.2146.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)",
"Googlebot/2.1 (http://www.googlebot.com/bot.html)",
"Opera/9.20 (Windows NT 6.0; U; en)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0",
]
config = {
'user': 'root',
'password': 'hello',
'host': '192.168.86.86',
'port': '3306',
'database': 'service_cd',
'raise_on_warnings': True,
}
URL = "http://www.cdcc.gov.cn/QualitySafeShow/NightWorkList.aspx"
BASE_URL = "http://www.cdcc.gov.cn/QualitySafeShow/"
def __init__(self):
self.total_page = 0
self.urls = []
self.__VIEWSTATE = ""
self.__EVENTVALIDATION = ""
self.__EVENTTARGET = ""
self.cookie = ""
self.crawl_date = time.strftime('%Y%m%d', time.localtime())
# Init mysql
self.conn = mysql.connector.connect(**self.config)
self.cursor = self.conn.cursor()
def save2db(self, data):
template = "INSERT INTO nightwork(unit, project, part, start, end, addr, crawl_date) " \
"VALUES (%(unit)s, %(project)s, %(part)s, %(start)s, %(end)s, %(addr)s, %(crawl_date)s)"
self.cursor.execute(template, data)
self.conn.commit()
# 1st crawl, get total
def crawl(self):
print("crawling page 1")
headers = {
"User-Agent": random.choice(self.USER_AGENTS)
}
browser = requests.get(self.URL, headers=headers)
if browser.status_code == 200:
session = browser.cookies.get("ASP.NET_SessionId")
self.cookie = "ASP.NET_SessionId=" + session
html = lxml.html.fromstring(browser.text)
# Crawl urls of 1st page
links = html.xpath('//table[@id="DgList"]/tr/td[2]/a')
for link in links:
self.urls.append(self.BASE_URL + str(link.attrib["href"]))
page_div = html.xpath('//div[@id="Navigate_divPanel"]/span')
if len(page_div):
tmp = str(page_div[0].text_content())
match = re.findall(r'(\d+)', tmp)
self.total_page = int(match[0])
view_state_div = html.xpath('//input[@id="__VIEWSTATE"]')
self.__VIEWSTATE = view_state_div[0].attrib["value"]
event_valid_div = html.xpath('//input[@id="__EVENTVALIDATION"]')
self.__EVENTVALIDATION = event_valid_div[0].attrib["value"]
self.__EVENTTARGET = "Navigate$btnNavNext"
self.crawl_step2()
# Only 1 page, start final_crawl()
else:
self.final_crawl()
else:
print("Error while crawling page 1")
self.crawl_step2()
def crawl_step2(self):
for p in range(2, self.total_page + 1):
data = {
"__VIEWSTATE": self.__VIEWSTATE,
"__EVENTVALIDATION": self.__EVENTVALIDATION,
"__EVENTTARGET": self.__EVENTTARGET,
}
print("crawling page {}".format(p))
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": random.choice(self.USER_AGENTS),
"Cookie": self.cookie
}
browser = requests.post(self.URL, headers=headers, data=urlencode(data))
if browser.status_code == 200:
html = lxml.html.fromstring(browser.text)
view_state_div = html.xpath('//input[@id="__VIEWSTATE"]')
self.__VIEWSTATE = view_state_div[0].attrib["value"]
event_valid_div = html.xpath('//input[@id="__EVENTVALIDATION"]')
self.__EVENTVALIDATION = event_valid_div[0].attrib["value"]
self.__EVENTTARGET = "Navigate$btnNavNext"
links = html.xpath('//table[@id="DgList"]/tr/td[2]/a')
for link in links:
self.urls.append(self.BASE_URL + str(link.attrib["href"]))
self.final_crawl()
else:
print("Error while crawling page {}".format(p))
self.final_crawl()
def final_crawl(self):
for url in self.urls:
print("Crawling url: {}".format(url))
headers = {
"User-Agent": random.choice(self.USER_AGENTS)
}
browser = requests.get(url, headers=headers)
if browser.status_code == 200:
html = lxml.html.fromstring(browser.text)
tds = html.xpath('//table[@id="viewTable"]/tr/td[2]')
data = {
"unit": str(tds[0].text_content()),
"project": str(tds[1].text_content()),
"part": str(tds[2].text_content()),
"start": str(tds[3].text_content()),
"end": str(tds[4].text_content()),
"addr": str(tds[5].text_content()),
"crawl_date": self.crawl_date
}
self.save2db(data)
else:
print("Error while crawling url: {}".format(url))
if __name__ == "__main__":
spider = NightWorkSpider()
spider.crawl()
spider.cursor.close()
spider.conn.close()
| [
"ski2per@163.com"
] | ski2per@163.com |
fc3a3a852a14c61bf200443577da2911fd89726f | d954e2f74d1186c8e35be8ea579656513d8d3b98 | /rllib/utils/metrics/learner_info.py | c3d0672ed9b43cee73cde9f6bbcb3ab1634805d6 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vakker/ray | a865de214e60f9e62d61c03ae7ce55ad6030f84c | de238dd626a48a16c8b3cd006f3482db75f63a83 | refs/heads/master | 2023-01-23T22:30:44.839942 | 2022-10-23T01:05:48 | 2022-10-23T01:05:48 | 171,845,804 | 0 | 1 | Apache-2.0 | 2023-01-14T08:01:04 | 2019-02-21T09:54:36 | Python | UTF-8 | Python | false | false | 4,034 | py | from collections import defaultdict
import numpy as np
import tree # pip install dm_tree
from typing import Dict
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.typing import PolicyID
# Instant metrics (keys for metrics.info).
LEARNER_INFO = "learner"
# By convention, metrics from optimizing the loss can be reported in the
# `grad_info` dict returned by learn_on_batch() / compute_grads() via this key.
LEARNER_STATS_KEY = "learner_stats"
@DeveloperAPI
class LearnerInfoBuilder:
def __init__(self, num_devices: int = 1):
self.num_devices = num_devices
self.results_all_towers = defaultdict(list)
self.is_finalized = False
def add_learn_on_batch_results(
self,
results: Dict,
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Adds a policy.learn_on_(loaded)?_batch() result to this builder.
Args:
results: The results returned by Policy.learn_on_batch or
Policy.learn_on_loaded_batch.
policy_id: The policy's ID, whose learn_on_(loaded)_batch method
returned `results`.
"""
assert (
not self.is_finalized
), "LearnerInfo already finalized! Cannot add more results."
# No towers: Single CPU.
if "tower_0" not in results:
self.results_all_towers[policy_id].append(results)
# Multi-GPU case:
else:
self.results_all_towers[policy_id].append(
tree.map_structure_with_path(
lambda p, *s: _all_tower_reduce(p, *s),
*(
results.pop("tower_{}".format(tower_num))
for tower_num in range(self.num_devices)
)
)
)
for k, v in results.items():
if k == LEARNER_STATS_KEY:
for k1, v1 in results[k].items():
self.results_all_towers[policy_id][-1][LEARNER_STATS_KEY][
k1
] = v1
else:
self.results_all_towers[policy_id][-1][k] = v
def add_learn_on_batch_results_multi_agent(
self,
all_policies_results: Dict,
) -> None:
"""Adds multiple policy.learn_on_(loaded)?_batch() results to this builder.
Args:
all_policies_results: The results returned by all Policy.learn_on_batch or
Policy.learn_on_loaded_batch wrapped as a dict mapping policy ID to
results.
"""
for pid, result in all_policies_results.items():
if pid != "batch_count":
self.add_learn_on_batch_results(result, policy_id=pid)
def finalize(self):
self.is_finalized = True
info = {}
for policy_id, results_all_towers in self.results_all_towers.items():
# Reduce mean across all minibatch SGD steps (axis=0 to keep
# all shapes as-is).
info[policy_id] = tree.map_structure_with_path(
_all_tower_reduce, *results_all_towers
)
return info
def _all_tower_reduce(path, *tower_data):
"""Reduces stats across towers based on their stats-dict paths."""
# TD-errors: Need to stay per batch item in order to be able to update
# each item's weight in a prioritized replay buffer.
if len(path) == 1 and path[0] == "td_error":
return np.concatenate(tower_data, axis=0)
elif tower_data[0] is None:
return None
if isinstance(path[-1], str):
# Min stats: Reduce min.
if path[-1].startswith("min_"):
return np.nanmin(tower_data)
# Max stats: Reduce max.
elif path[-1].startswith("max_"):
return np.nanmax(tower_data)
if np.isnan(tower_data).all():
return np.nan
# Everything else: Reduce mean.
return np.nanmean(tower_data)
| [
"noreply@github.com"
] | vakker.noreply@github.com |
3e034c1f69961ac0240eb97d3fa99c041e1ea2e1 | c9803fb67b885214f138a805990d77cf4d714818 | /proof_of_work/deep_q/v0/deepqagentv0.py | 8b0e078ffc9d708f5a50856f8a53fc868448bd46 | [
"MIT"
] | permissive | michaelneuder/parkes_lab_fa19 | e68247ad5253d54f4d6074593a0e63fe61fcfc18 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | refs/heads/master | 2020-07-12T10:32:15.585380 | 2020-01-26T21:45:05 | 2020-01-26T21:45:05 | 204,792,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,374 | py | import copy
from environmentv0 import Environment
from keras.models import clone_model
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import matplotlib.pyplot as plt
plt.style.use('seaborn-muted')
import numpy as np
import progressbar
import time
import util
np.random.seed(0)
class DeepQLearningAgent(object):
def __init__(self, discount, alpha, T, rho):
# MDP
self.alpha = alpha
self.T = T
self.rho = rho
self.exploration_rate = 1
self.exploration_decrease = float(1e-5)
self.min_exploration_rate = 0.1
# deep q
self.learning_rate = 0.001
self.value_model = util.createModel(self.learning_rate)
self.target_model = clone_model(self.value_model)
self.target_model.set_weights(self.value_model.get_weights())
self.learning_update_count = 0
self.max_learning_steps = int(4e4)
self.memories = []
self.training_memory_count = 32
self.discount = discount
self.update_target_frequency = 1000
self.max_memory_count = 10000
self.min_memory_count_learn = 1000
# environment
self.env = Environment(self.alpha, self.T)
# visualization
self.states_visited = np.zeros((self.T+1, self.T+1))
self.steps_before_done = []
self.last_50_steps = []
self.snyc_points = []
self.timing_between_updates = []
self.net_training_time = []
# timing
self.last_target_net_clone = time.time()
def chooseAction(self, current_state):
# explore based on number of visits to that state.
self.exploration_rate -= self.exploration_decrease
current_explore_rate = self.exploration_rate
if self.exploration_rate < self.min_exploration_rate:
current_explore_rate = self.min_exploration_rate
if np.random.uniform() < current_explore_rate:
return np.random.randint(low=0, high=3)
return np.argmax(self.value_model.predict(util.prepareInput(current_state)))
def syncModels(self):
self.target_model = clone_model(self.value_model)
self.target_model.set_weights(self.value_model.get_weights())
def learn(self, iterations=10000):
start_time = time.time()
while self.learning_update_count < self.max_learning_steps:
self.runTrial()
print("total time {:.04f} s".format(time.time() - start_time))
def runTrial(self):
done = False
self.env.reset()
step_counter = 0
while (not done) and (self.learning_update_count < self.max_learning_steps):
step_counter += 1
current_state = self.env.current_state
self.states_visited[current_state] += 1
# take action
action = self.chooseAction(current_state)
new_state, reward, done = self.env.takeAction(action)
reward_value = util.evalReward(self.rho, reward)
# creating a new memory
memory = dict({
'current_state' : current_state,
'action' : action,
'reward' : reward_value,
'new_state' : new_state,
'done' : done
})
self.memories.append(memory)
# training network
if len(self.memories) > self.min_memory_count_learn:
start_training = time.time()
self.trainNeuralNet()
self.net_training_time.append(time.time() - start_training)
self.learning_update_count += 1
# keep memory list finite
if len(self.memories) > self.max_memory_count:
self.memories.pop(0)
# update models
if self.learning_update_count % self.update_target_frequency == 0:
print('global step: {}. syncing models'.format(self.learning_update_count))
update_time = time.time() - self.last_target_net_clone
self.timing_between_updates.append(update_time)
print(' last synced: {:.04f} s ago'.format(update_time))
updates_remaining = (self.max_learning_steps - self.learning_update_count)/ self.update_target_frequency
print(' eta: {:.02f} s'.format(updates_remaining * update_time))
print('*'*30)
self.syncModels()
self.value_model.save('saved_models/value_net_iter{0:06d}.h5'.format(self.learning_update_count))
self.snyc_points.append(self.learning_update_count)
self.last_50_steps.append(np.mean(self.steps_before_done[-50:]))
self.last_target_net_clone = time.time()
self.steps_before_done.append(step_counter)
def trainNeuralNet(self):
memory_subset_indeces = np.random.randint(low=0, high=len(self.memories), size=self.training_memory_count)
memory_subset = [self.memories[i] for i in memory_subset_indeces]
rewards = []
current_states = []
new_states = []
actions = []
dones = []
for memory in memory_subset:
rewards.append(memory['reward'])
current_states.append(memory['current_state'])
new_states.append(memory['new_state'])
actions.append(memory['action'])
dones.append(memory['done'])
current_state_predictions = np.zeros((len(current_states), 3))
new_states_prepped = util.prepareInputs(new_states)
# new_state_predictions = self.target_model.predict(new_states_prepped)
new_state_predictions = [[1,1,1]]
for i in range(len(new_state_predictions)):
total_reward = rewards[i]
if not dones[i]:
total_reward += self.discount * max(new_state_predictions[i])
# clip
if total_reward > 1:
total_reward = 1
elif total_reward < -1:
total_reward = -1
current_state_predictions[i][actions[i]] = total_reward
# fiting model --- this is the neural net training
self.value_model.fit(
np.squeeze(np.asarray(current_states)),
np.squeeze(np.asarray(current_state_predictions)),
epochs=1,
verbose=False)
def main():
qlagent = DeepQLearningAgent(discount=0.99, alpha=0.45, T=9 , rho=0.6032638549804688)
qlagent.learn(iterations=int(5000))
print(qlagent.exploration_rate)
plt.plot(qlagent.net_training_time)
plt.show()
# results
analyzer = util.ResultsAnalyzer(
qlagent.value_model, qlagent.states_visited, qlagent.steps_before_done,
qlagent.last_50_steps, qlagent.snyc_points, qlagent.timing_between_updates)
end_policy = analyzer.extractPolicy()
analyzer.processPolicy(end_policy)
analyzer.plotStatesVisited(save=True)
analyzer.plotLogStatesVisited(save=True)
analyzer.plotStepsCounter(save=True)
analyzer.plotExploration(save=True)
analyzer.plotLast50(save=True)
analyzer.plotTimings(save=True)
if __name__ == "__main__":
main() | [
"michael.neuder@gmail.com"
] | michael.neuder@gmail.com |
553c8bdce9310f714de89a953254547790cb5798 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_14/23.py | d377a8e795b0b42d56e3300bc9c3260586116e75 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/usr/bin/env python
import gmpy,math
import sys
f=sys.stdin
n=int(f.next())
class Case(object):
def __init__(self):
self.res = "IMPOSSIBLE"
N,M,A = map(int,f.next().split())
if N*M < A:
return
for xb in range(N+1):
for yb in range(M+1):
for xc in range(yb,N+1):
for yc in range(xb,M+1):
if abs(xb*yc - xc*yb) == A:
self.res = "%s %s %s %s %s %s"%(0,0,xb,yb,xc,yc)
return
def run(self):
pass
def __str__(self):
return str(self.res)
for case in range(1, n+1):
c=Case()
c.run()
print "Case #%s: %s"%(case,c)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f264b6aefd6e4f3b76d8adff5912a5ebfda45ef3 | 3d89ff4093d989940e7d0e535343a748adb0a87f | /5690-ClosestDessertCost.py | 8389d0bb9e6e364114ec8116c04b9239786fe5fc | [] | no_license | Scott-Larsen/LeetCode | 129585bb3017fbb59c07c22f74afe4309b46c15d | f644afb34f15cd4e310026a00ccf4149ba8daf10 | refs/heads/main | 2021-06-22T23:02:06.515527 | 2021-06-12T16:30:31 | 2021-06-12T16:30:31 | 204,087,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | # 5690. Closest Dessert Cost
# You would like to make dessert and are preparing to buy the ingredients. You have n ice cream base flavors and m types of toppings to choose from. You must follow these rules when making your dessert:
# There must be exactly one ice cream base.
# You can add one or more types of topping or have no toppings at all.
# There are at most two of each type of topping.
# You are given three inputs:
# baseCosts, an integer array of length n, where each baseCosts[i] represents the price of the ith ice cream base flavor.
# toppingCosts, an integer array of length m, where each toppingCosts[i] is the price of one of the ith topping.
# target, an integer representing your target price for dessert.
# You want to make a dessert with a total cost as close to target as possible.
# Return the closest possible cost of the dessert to target. If there are multiple, return the lower one.
class Solution:
def closestCost(
self, baseCosts: List[int], toppingCosts: List[int], target: int
) -> int:
combos = set(baseCosts)
for topping in toppingCosts:
cmbs = list(combos)
for c in cmbs:
combos.add(topping + c)
combos.add(2 * topping + c)
if target in combos:
return target
i = 1
while i <= target:
if target - i in combos:
return target - i
elif target + i in combos:
return target + i
i += 1
return min(baseCosts) | [
"scott@scottlarsen.com"
] | scott@scottlarsen.com |
ff848fbbf9d48acf972b91af78b1a7f35fba2c83 | 53f3eb1730f94f89d9d9d3d80a4182360d4e4420 | /13/utils/scanners.py | 4ed34abbdb482bd8e2e43cd02a596e078e284442 | [
"MIT"
] | permissive | Magnificent-Big-J/advent-of-code-2017 | 964b1da28b4e4f4398a3562baa130d5fdd701e9a | b83a849752c9a045978a0ea5eceb409adbfca0f4 | refs/heads/master | 2021-09-01T06:59:10.604222 | 2017-12-23T14:52:22 | 2017-12-23T14:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | def load_scanners():
layers = {}
layer = 0
with open('input.txt') as f:
for line in f:
data = [int(i) for i in line.split(': ')]
while layer != data[0]:
layers[layer] = {'s': -1, 'd': -1, 'dir': None}
layer += 1
layers[data[0]] = {'s': 0, 'd': data[1], 'dir': 'down'}
layer += 1
return layers
def move_scanners(layers):
for j in layers:
if layers[j]['dir'] == 'down':
if layers[j]['s'] < (layers[j]['d'] - 1):
layers[j]['s'] += 1
else:
layers[j]['s'] -= 1
layers[j]['dir'] = 'up'
elif layers[j]['dir'] == 'up':
if layers[j]['s'] > 0:
layers[j]['s'] -= 1
else:
layers[j]['s'] += 1
layers[j]['dir'] = 'down'
return layers
| [
"chris@chrxs.net"
] | chris@chrxs.net |
0a6db6f5367369ae8bb4340f78ad9fdd04f78a82 | 6a1975a11de163ce0e6a5f001df41758bea3686b | /1047. Remove All Adjacent Duplicates In String/Solution_栈.py | 5bd255435f7a6ac7e19295dca77315666a0668f4 | [] | no_license | Inpurple/Leetcode | 7f08e0e500d37913e9244f08ea8f603b3fc1ce88 | df2bcca72fd303100dbcd73d1dfae44467abbb44 | refs/heads/master | 2020-05-20T02:17:08.430557 | 2019-09-22T07:51:28 | 2019-09-22T07:51:28 | 185,327,908 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Solution(object):
def removeDuplicates(self, S):
"""
:type S: str
:rtype: str
"""
sta=[]
for i in S:
if sta and sta[-1]==i:
sta.pop()
else:
sta.append(i)
return ''.join(sta)
| [
"noreply@github.com"
] | Inpurple.noreply@github.com |
dcd366a00afd84b0b4dc0d78f57f34f918a3028d | 0fea8a6421fe5f5967f2202910022c2bfd277b4d | /164.生成一个随机的8位密码,要求4个字母和4个数字.py | c4b132ec248696b2fb5da097f6336c9a28df8e14 | [] | no_license | maohaoyang369/Python_exercise | 4dc10ec061aa0de2bcfe59c86be115e135fb3fab | 8fbee8854db76d09e2b1f9365ff55198ddabd595 | refs/heads/master | 2020-04-09T23:04:02.327118 | 2019-09-05T14:49:07 | 2019-09-05T14:49:07 | 160,646,057 | 0 | 2 | null | 2019-03-21T14:44:13 | 2018-12-06T08:50:19 | Python | UTF-8 | Python | false | false | 411 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# 生成一个随机的8位密码,要求4个字母和4个数字
import random
import string
spam_num = random.choices("0123456789", k=4)
print(spam_num)
spam_letters = random.sample(string.ascii_letters, 4)
print(spam_letters)
spam = spam_num+spam_letters
print(spam)
spam_num_letters = random.shuffle(spam)
print(spam)
secrity = "".join(spam)
print(secrity)
| [
"372713573@qq.com"
] | 372713573@qq.com |
07ed60f2ac262214e2aa84b74db7f7fd479050c3 | 5cc204e2ecb9a756127e7c71633a1edcdb3e989b | /pylmp/InKim/BGF_mergeBgf.py | 4e3d973e087898e5159c36b8879389f57020e8c7 | [] | no_license | hopefulp/sandbox | 1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8 | 4d26767f287be6abc88dc74374003b04d509bebf | refs/heads/master | 2023-06-27T17:50:16.637851 | 2023-06-15T03:53:39 | 2023-06-15T03:53:39 | 218,209,112 | 1 | 0 | null | 2022-09-13T13:22:34 | 2019-10-29T05:14:02 | C++ | UTF-8 | Python | false | false | 3,415 | py | #!/opt/applic/epd/bin/python
import sys, re, string, getopt, optparse, math, time
from os import popen
option = ""; args = ""; bgf_file = ""; mod_file = ""; out_file = ""
usage = """
Usage: mergeBGF.py -b bgf1_file -c bgf2_file -o out_file
"""
options, args = getopt.getopt(sys.argv[1:], 'hb:c:o:', ['help','bgf1=','bgf2=','out='])
for option, value in options:
if option in ('-h', '--help'):
print usage; sys.exit(0)
elif option in ('-b', '--bgf1'):
bgf1_file = value
elif option in ('-c', '--bgf2'):
bgf2_file = value
elif option in ('-o', '--out'):
out_file = value
elif option in (''):
print usage; sys.exit(0)
#-----------------
# merge two bgf file
#
#_________________
def mergebgf(bgf1_file, bgf2_file, out_file):
print(options)
# read bgf 1 and bgf 2
f_bgf1_file = open(bgf1_file)
f_bgf2_file = open(bgf2_file)
f_out_file = open(out_file,'w')
bgf1_atom_data = []; bgf2_atom_data = []; bgf1_conect_data = []; bgf2_conect_data = []
n_atoms_1 = 0; n_atoms_2 = 0
while 1:
line = f_bgf1_file.readline()
if not line:
break
if 'HETATM' in line:
n_atoms_1 += 1
parse = re.split('\s*', line)
bgf1_atom_data.append(parse)
if 'FORMAT' in line:
continue
if 'CONECT' in line:
parse = re.split('\s*', line)
parse = parse[:-1]
bgf1_conect_data.append(parse)
while 1:
line = f_bgf2_file.readline()
if not line:
break
if 'HETATM' in line:
n_atoms_2 += 1
parse = re.split('\s*', line)
bgf2_atom_data.append(parse)
if 'FORMAT' in line:
continue
if 'CONECT' in line:
parse = re.split('\s*', line)
parse = parse[:-1]
bgf2_conect_data.append(parse)
# add n_atom_1 to atom id of bgf 2
#margin = int(math.ceil(n_atoms_1 / 10.0)*10)
#print(margin)
margin = n_atoms_1
for atom in bgf2_atom_data:
atom[1] = str(int(atom[1]) + margin)
for conect in bgf2_conect_data:
n_conect = len(conect)
for i in xrange(1, n_conect):
conect[i] = str(int(conect[i]) + margin)
# merge the file sequentially: 1 -> 2
f_bgf1_file.seek(0)
f_bgf2_file.seek(0)
# header
while 1:
line = f_bgf1_file.readline()
if not line:
break
if 'HETATM' in line:
break
f_out_file.write(line)
# atom data of bgf1
for item in bgf1_atom_data:
item[6] = float(item[6])
item[7] = float(item[7])
item[8] = float(item[8])
item[12] = float(item[12])
wline = '{0:>6} {1:>5} {2:<5} {3:3} {4:<1}{5:>5} {6:>10.5f}{7:>10.5f}{8:>10.5f} {9:<5}{10:3}{11:2} {12:>8.5f}'.format(*item)
wline += '\n'
f_out_file.write(wline)
# atom data of bgf2
for item in bgf2_atom_data:
item[6] = float(item[6])
item[7] = float(item[7])
item[8] = float(item[8])
item[12] = float(item[12])
wline = '{0:>6} {1:>5} {2:<5} {3:3} {4:<1}{5:>5} {6:>10.5f}{7:>10.5f}{8:>10.5f} {9:<5}{10:3}{11:2} {12:>8.5f}'.format(*item)
wline += '\n'
f_out_file.write(wline)
f_out_file.write('FORMAT CONECT (a6,12i6)\n')
wline = ""
for item in bgf1_conect_data:
for i in xrange(0, len(item)):
wline += '{0:>6}'.format(item[i])
wline += '\n'
f_out_file.write(wline)
wline = ""
for item in bgf2_conect_data:
for i in xrange(0, len(item)):
wline += '{0:>6}'.format(item[i])
wline += '\n'
f_out_file.write(wline)
f_out_file.write("END\n")
f_out_file.write("")
f_out_file.close()
#return 1
# main call
mergebgf(bgf1_file, bgf2_file, out_file)
| [
"hopefulp@gmail.com"
] | hopefulp@gmail.com |
4b33c4af014c182b96c8f0f664c28eb3b5f7d2b0 | 50d6a01aac56215c166d5659196dbcbcbf48c5d2 | /mongo/src/conn.py | 6bbf8572d94f3e70610b34726d5e16f6696228d2 | [] | no_license | HackUPCCrew/MachineLearning | c66541709165382b3c1e15c5d51bc2b068f57948 | 7697dcdf73a8e0a24f8793118612cbbf25653153 | refs/heads/master | 2021-07-14T01:25:59.521438 | 2017-10-17T19:35:45 | 2017-10-17T19:35:45 | 106,882,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | #!/usr/bin/env python3
from pymongo import MongoClient
from pprint import pprint
client = MongoClient("mongodb://34.224.70.221:8080")
db=client.admin
serverStatusResult=db.command("serverStatus")
pprint(serverStatusResult)
| [
"krishnakalyan3@gmail.com"
] | krishnakalyan3@gmail.com |
5c9f74d4f9302e90ca39b1dd80dce303ed88f773 | aedd3aeadfb13eda4489d26ee3d9762598878936 | /leetcode/1281. 整数的各位积和之差.py | f67fa9eb3bba6026574b965d007dd6e0b0c201b1 | [] | no_license | AnJian2020/Leetcode | 657e8225c4d395e8764ef7c672d435bda40584c7 | cded97a52c422f98b55f2b3527a054d23541d5a4 | refs/heads/master | 2023-03-26T16:25:36.136647 | 2021-03-26T07:04:10 | 2021-03-26T07:04:10 | 283,940,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Solution:
def subtractProductAndSum(self, n: int) -> int:
numList=list(str(n))
sum=0
product=1
for item in range(len(numList)):
numList[item]=int(numList[item])
sum+=numList[item]
product*=numList[item]
result=product-sum
return result
if __name__ == "__main__":
print(Solution().subtractProductAndSum(4421)) | [
"xuhao2018@foxmail.com"
] | xuhao2018@foxmail.com |
9b4871a27d15086682164ca0e12198fdb16cab67 | a4344e89e7f467d8bfd3f000f8cced17e36bfd70 | /predict.py | 3a781070190775ab4d7ab85cabf0b6a3f4912cfa | [] | no_license | Schnei1811/InsectClassifier | 5b8d90e21dd23857af82aa26d048591bb70a2cf5 | b8c22a103b7f2099058f4994681a8b2babc147a2 | refs/heads/master | 2023-04-18T08:51:07.753666 | 2021-03-14T03:07:49 | 2021-03-14T03:07:49 | 347,531,957 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import argparse
import cv2
from glob import glob
from tqdm import tqdm
import numpy as np
import os
import torch, torchvision
import torch.nn as nn
from torchvision import models, transforms
import json
import csv
# Number of classes in the dataset
img_size = 224
class GlobalAvgPool2d(nn.Module):
def forward (self, x):
return torch.mean(x.view(x.size(0), x.size(1), -1), dim=2)
def initialize_model(arch, num_classes):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
if arch == "resnet":
""" Resnet101 """
model_ft = models.resnet101()
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
elif arch == "mobilenet":
""" Mobilenet """
model_ft = models.mobilenet_v2()
num_ftrs = model_ft.classifier[1].in_features
elif arch == "densenet":
""" Densenet """
model_ft = models.densenet201() #DenseNet201
num_ftrs = model_ft.classifier.in_features
else:
print(f"Unknown model name {arch}. Choose from resnet, mobilenet, or densenet")
quit()
model_ft.classifier = nn.Sequential(
GlobalAvgPool2d(), #Equivalent to GlobalAvgPooling in Keras
# nn.Linear(1920, 1024),
nn.Linear(num_ftrs, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, num_classes))
return model_ft
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, X_images, X_paths):
self.X_images = X_images
self.X_paths = X_paths
def __len__(self):
return len(self.X_images)
def __getitem__(self, idx):
sample = self.X_images[idx]
sample = sample.astype("float32") / 255.0
sample = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])(sample)
return (sample, self.X_paths[idx])
def buildImageAspectRatio(X_path):
img = cv2.imread(X_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
resize_x = int(img.shape[1] * img_size / max(img.shape))
resize_y = int(img.shape[0] * img_size / max(img.shape))
push_x = (img_size - resize_x) // 2
push_y = (img_size - resize_y) // 2
resized_img = cv2.resize(img, (resize_x, resize_y))
canvas = np.zeros((img_size, img_size, 3)).astype("uint8") + 255
canvas[push_y:resized_img.shape[0] + push_y, push_x:resized_img.shape[1] + push_x, :] = resized_img
return canvas
def createData(data_name, X_paths):
if not os.path.exists("Arrays_Batches"):
os.makedirs("Arrays_Batches")
if not os.path.exists("Arrays_Data"):
os.makedirs("Arrays_Data")
reset = True
data_batch = 0
for i, X_path in enumerate(tqdm(X_paths)):
if reset == True:
reset = False
X = np.expand_dims(buildImageAspectRatio(X_path), axis=0)
else:
X = np.vstack((X, np.expand_dims(buildImageAspectRatio(X_path), axis=0)))
if not i == 0 and i % 999 == 0:
reset = True
np.save(f"Arrays_Batches/{data_name}_Input_{data_batch}_{len(X)}.npy", X)
data_batch += 1
if i == len(X_paths) - 1:
np.save(f"Arrays_Batches/{data_name}_Input_{data_batch}_{len(X)}.npy", X)
data_batch += 1
data_paths = []
for batch in range(data_batch):
data_paths.append(glob(f'Arrays_Batches/{data_name}_Input_{batch}_*')[0])
for i, data_path in enumerate(tqdm(data_paths)):
data = np.load(data_path)
if i == 0:
X = data
else:
X = np.vstack((X, data))
np.save(f'Arrays_Data/{data_name}_Input_{len(X)}.npy', X)
def test_model(model, dataloader, device, num_to_class, report_csv):
model.eval()
preds_array = np.array([])
for inputs, paths in tqdm(dataloader):
inputs = inputs.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
preds_cpu = preds.cpu().numpy()
preds_array = np.append(preds_array, preds_cpu)
for i, pred in enumerate(preds_cpu):
img_name = paths[i].split("/")[-1]
report_csv.append([img_name, num_to_class[pred]])
csv_path = f"pred.csv"
with open(csv_path, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(report_csv)
def main(data_name, arch, model_name, batch_size):
report_csv = [["file_path", "prediction (Order_Family)"]]
with open(f"metadata/{data_name}_num_to_class.json") as f:
num_to_class = json.load(f)
num_to_class = {int(k):v for k,v in num_to_class.items()}
num_classes = len(num_to_class)
X_paths = glob("extracted/*")
input_file_path = f"Arrays_Data/{data_name}_Input_{len(X_paths)}.npy"
if not os.path.exists(input_file_path):
createData(data_name, X_paths)
X = np.load(input_file_path)
image_dataset = CustomDataset(X, X_paths)
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
model_ft = initialize_model(arch, num_classes)
# Detect if we have a GPU available
# if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
model_ft = nn.DataParallel(model_ft)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device)
model_path = os.path.join("models", arch, model_name)
model_ft.load_state_dict(torch.load(model_path))
test_model(model_ft, dataloader, device, num_to_class, report_csv)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_name", default="Alus")
parser.add_argument("--arch", default="mobilenet") #densenet, resnet, mobilenet
parser.add_argument("--model_name", default="0_0.9765853658536585_450.pt")
parser.add_argument("--batch_size", default=32, type=int)
args = parser.parse_args()
main(args.data_name, args.arch, args.model_name, args.batch_size) | [
"stefan871@gmail.com"
] | stefan871@gmail.com |
36bd450f476d6d992245f98f6ee62e8f0459c471 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 10082 - WERTYU/main.py | f4cf71c10e1e6bc76858ecb5779421a0e7b80c6f | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | '''
Created on Jun 18, 2013
@author: Yubin Bai
All rights reserved.
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 30
def solve(par):
r1 = '`1234567890-' + 'qwertyuiop[' + 'asdfghjhkl' + 'zxcvbnm,.'
r2 = '1234567890-=' + 'wertyuiop[]' + 'sdfghjhkl;' + 'xcvbnm,./'
d = {' ': ' '}
for k, v in zip(r2, r1):
d[k.upper()] = v.upper()
word = par
result = []
for c in word:
result.append(d[c])
return ''.join(result)
class Solver:
def getInput(self):
self.numOfTests = 1
self.input = []
word = self.fIn.readline().strip()
self.input.append((word))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("Case #%d: %s\n" % (test + 1, self.results[test]))
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| [
"xenron@outlook.com"
] | xenron@outlook.com |
14da669856411f17a43c79936abfc07ed1dc2c1c | d9e0406c275417791024f97abc0600c96910633f | /question/migrations/0005_auto_20210510_1025.py | 549c6daf667227fa8927f506dd141be48c18de81 | [] | no_license | lesage20/vuejs | 46c75e7528ae6e9834f351ed4f814869fae417ac | da0522280dd1e6cf858c90758f38c4da963785a1 | refs/heads/main | 2023-04-19T12:20:54.672778 | 2021-05-12T08:42:34 | 2021-05-12T08:42:34 | 366,649,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 3.1.7 on 2021-05-10 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question', '0004_question_titre'),
]
operations = [
migrations.AlterField(
model_name='question',
name='prop',
field=models.ManyToManyField(blank=True, null=True, related_name='question', to='question.Proposition'),
),
]
| [
"angezanou00@gmail.com"
] | angezanou00@gmail.com |
b6b1f4eed9917b484d8c00356460fcc6a66f2a3b | 04e080a00f37a3501c5060380d65c5a6cd669d90 | /thonnycontrib/m5stack/esp8266_api_stubs/uhashlib.py | 02e34e1294866de8ac040af1dc3d2d19287c2acc | [
"MIT"
] | permissive | thonny/thonny-m5stack | 473a2876e72b88d283d8b9d64189028ef7fea111 | a502579ad5e264342ae0bc2c554c78527053693b | refs/heads/master | 2020-04-20T14:57:15.605699 | 2019-11-18T22:28:36 | 2019-11-18T22:28:36 | 168,914,658 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
class sha1:
''
def digest():
pass
def update():
pass
class sha256:
''
def digest():
pass
def update():
pass
| [
"aivar.annamaa@gmail.com"
] | aivar.annamaa@gmail.com |
199dc7283ca9dfcf42146c61abe183d9955ad52c | 84dbd7dfdc2c63433b2088dd3fe711a07cf8b3b8 | /week13/day1/daily/phonedir/views.py | e34357f2c7c7e8e170399766febe68bd94355371 | [] | no_license | jfrance00/di-exercises | 623bebeddd3ff3ed062e1ad5097f15f7ed002362 | bbc97714c26b41ed76dfed35df5780e3aa482b5e | refs/heads/master | 2022-11-27T23:31:59.742231 | 2020-07-29T12:20:01 | 2020-07-29T12:20:01 | 257,882,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import flask
import wtforms as wtf
from . import forms, models
from . import app, db
@app.route('/', methods=['GET', 'POST'])
def index():
form = forms.SearchNameOrNumber()
return flask.render_template('index.html', form=form)
@app.route('/add-input', methods=['GET', 'POST'])
def add_input():
form = forms.AddPersonToDatabase()
if flask.request.method == "POST":
id = form.id.data
name = form.name.data
phone = form.phone.data
email = form.email.data
address = form.address.data
entry = models.Person(id=id, name=name, phone=phone, email=email, address=address)
db.session.add(entry)
db.session.commit()
flask.flash(f'{name} added successfully')
return flask.render_template('add-input.html', form=form) | [
"jfrance00@gmail.com"
] | jfrance00@gmail.com |
f8eb219a525fd3d56b4f5fae1875cccf536032f1 | ee7ca0fed1620c3426fdfd22e5a82bba2a515983 | /dsn_purchase_order/models/purchase.py | 843a5a966c61fcaa6cf586e164ee2d06c78cc085 | [] | no_license | disna-sistemas/odoo | 318d0e38d9b43bea56978fe85fc72850d597f033 | 0826091462cc10c9edc3cc29ea59c417f8e66c33 | refs/heads/8.0 | 2022-03-08T19:01:21.162717 | 2022-02-15T13:06:26 | 2022-02-15T13:06:26 | 99,210,381 | 0 | 5 | null | 2019-07-24T08:49:58 | 2017-08-03T08:36:55 | Python | UTF-8 | Python | false | false | 5,912 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp import tools, _
class dsnPurchaseOrder(models.Model):
_inherit = "purchase.order"
_order = "date_order desc, name"
class dsnPurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
# _order = "date_planned desc, name"
_order = "id"
@api.multi
@api.onchange('product_id')
def dsn_warning_obsolete(self):
self.ensure_one()
res = {}
if self.product_id:
_obsolete = False
if self.product.state and self.product_id.state=='obsolete':
res = {'warning': {'title': _('Obsolete Product'), 'message': _(
'This product is obsolete')}}
return res
class dsnPurchasereport(models.Model):
_inherit = "purchase.report"
dsncat2_id = fields.Many2one(comodel_name='product.category',
string='Cat2',
readonly=True)
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.dsncat2_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
t.dsncat2_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
| [
"sistemas@disna.com"
] | sistemas@disna.com |
b93664d963b69f1fac7d321eef6d9a3d5390debd | ad357cfbec64afb8f4cc4043b212996768f9755c | /api/barriers/migrations/0038_auto_20200224_1622.py | 4bd3709007de448df0b1ecfc265d0b5a0b6953ae | [
"MIT"
] | permissive | uktrade/market-access-api | 6b4680e6455eb5c25480ccd3e3d9445654269f36 | 4da26d1be53843d22411577409d9489010bdda09 | refs/heads/master | 2023-08-30T14:47:10.373148 | 2023-08-29T13:58:08 | 2023-08-29T13:58:08 | 131,856,014 | 2 | 3 | MIT | 2023-09-14T08:04:42 | 2018-05-02T13:38:37 | Python | UTF-8 | Python | false | false | 968 | py | # Generated by Django 2.2.8 on 2020-02-24 16:22
from django.db import migrations
def populate_archived_reason(apps, schema_editor):
BarrierInstance = apps.get_model("barriers", "BarrierInstance")
BarrierInstance.objects.filter(
archived=True,
archived_reason__isnull=True,
).update(archived_reason="OTHER", archived_explanation="Archive reason unknown")
def unpopulate_archived_reason(apps, schema_editor):
BarrierInstance = apps.get_model("barriers", "BarrierInstance")
BarrierInstance.objects.filter(
archived=True,
archived_reason="OTHER",
archived_explanation="Archive reason unknown",
).update(
archived_reason=None,
archived_explanation=None,
)
class Migration(migrations.Migration):
dependencies = [
("barriers", "0037_auto_20200224_1552"),
]
operations = [
migrations.RunPython(populate_archived_reason, unpopulate_archived_reason),
]
| [
"noreply@github.com"
] | uktrade.noreply@github.com |
d38479e4f3d5d36e535a5c308876ea91eff7adfb | aac11cb909c13b0f24e90e18bca098d0f52c048d | /makewiki/settings.py | e58696ce2984e1f9b308be7eea134585c125558a | [
"MIT"
] | permissive | LukazDane/makewiki_v2 | 7add2002bc9c9813a66461305b56b3b92ffe3c36 | d71790c99951ed47d202e5a00d1eb7480b8552bd | refs/heads/master | 2022-04-30T00:08:06.716424 | 2020-01-08T01:11:26 | 2020-01-08T01:11:26 | 221,078,482 | 0 | 0 | MIT | 2022-04-22T22:44:16 | 2019-11-11T22:04:27 | Python | UTF-8 | Python | false | false | 4,100 | py | """
Django settings for makewiki project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1yct-t!2bnkgc7j59z+9cdd2k)@y+ftqor$!aya()3if^cnlo-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', 'makewiki-lh.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts.apps.AccountsConfig', # new
'wiki',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_currentuser.middleware.ThreadLocalUserMiddleware',
]
ROOT_URLCONF = 'makewiki.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates').replace('\\', '/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'makewiki.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'wiki.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# wiki app settings
WIKI_PAGE_TITLE_MAX_LENGTH = 600
# Where to redirect during authentication
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
DEFAULT_LOGOUT_URL = '/'
# Required for Heroku
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# PROTIP:
# Need to override settings? Create a local_settings.py file
# in this directory, and add settings there.
try:
from makewiki.settings import *
except ImportError:
pass
| [
"deandrevidal@aol.com"
] | deandrevidal@aol.com |
51eb6471c303f10aa5f7d41241c0f542184c8c79 | 5d0e76e3c741adc120ce753bacda1e723550f7ac | /724. Find Pivot Index.py | d578d962c4b85d3f422ade4922f5502c890f4700 | [] | no_license | GoldF15h/LeetCode | d8d9d5dedca3cce59f068b94e2edf986424efdbf | 56fcbede20e12473eaf09c9d170c86fdfefe7f87 | refs/heads/main | 2023-08-25T12:31:08.436640 | 2021-10-20T04:36:23 | 2021-10-20T04:36:23 | 392,336,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Solution:
def pivotIndex(self, nums: List[int]) -> int:
right_sum = sum(nums)
left_sum = 0
prev = 0
for i in range(len(nums)) :
left_sum += prev
prev = nums[i]
right_sum -= nums[i]
if left_sum == right_sum :
return i
return -1 | [
"todsapon.singsunjit@gmail.com"
] | todsapon.singsunjit@gmail.com |
e6361dfa82714822273013df5ab2d96aacb6a6a4 | f366c19ce822a3e8f3cd5f670b25c6fa54322d0b | /python_udemy/introducao-python/iterando-strings-while.py | 7f9c554e14042ea18c66b656f267db6dbad27279 | [] | no_license | marcelomatz/py-studiesRepo | b83875a366010c9a60bc15d853fcf81c31cee260 | ce99014228f00d8c73cc548dd6c4d5fedc3f1b68 | refs/heads/main | 2023-09-05T00:03:47.712289 | 2021-06-15T09:43:27 | 2021-06-15T09:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # iterar é passar por cada um dos elementos de uma string
# se tem índice é iterável
frase = 'o rato roeu a roupa do rei de roma'
tamanho_frase = len(frase)
contador = 0
nova_string = ''
while contador < tamanho_frase:
letra = frase[contador]
if letra == 'r':
nova_string += 'R'
else:
nova_string += letra
contador += 1
print(nova_string)
| [
"agenciahipster@gmail.com"
] | agenciahipster@gmail.com |
dc910c5e544db2555849a7d275f3d49ddc8c3178 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/nckkem001/question1.py | db30afcae4f531091f93b33fe973fe4a0f450d70 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | """Program to analyse student marks from source file and determine which students
are advised to consult an advisor.
Kemeshan Naicker
11 May 2014"""
#Prompt user for name of source file.
file = input("Enter the marks filename:\n")
#Open file for processing
txtfile = open(file, "r")
#Read file into a string, and replace newline characters with spaces in order to
#read string into a list.
markslist = txtfile.read()
txtfile.close()
markslist = markslist.split("\n")
markslist = " ".join(markslist)
markslist = markslist.split(",")
markslist = " ".join(markslist)
#Read string into a list.
markslist = markslist.split()
marks = []
students = []
for i in range (0, len(markslist), 2):
students.append(markslist[i])
marks.append(eval(markslist[i+1]))
#Calculate standard deviation.
total = 0
N = len(marks)
for i in marks:
total += i
avrg = total/N
sdsum = 0
for i in marks:
sdsum += (i - avrg)**2
sd = (sdsum/N)**(1/2)
#Find students who are below one standard deviation of the mean and append them
#to a new list.
fail_list = []
for i in range(N):
if marks[i] < (avrg - sd):
fail_list.append(students[i])
#Print output.
print("The average is: {0:0.2f}".format(avrg))
print("The std deviation is: {0:0.2f}".format(sd))
if len(fail_list) > 0:
print("List of students who need to see an advisor:")
for i in fail_list:
print(i) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
70ddfb5469533e9612ff63f5df784bca6e0d927f | 27a31ec197f5603fe6fb438171a78bb381bf43b1 | /examples/cifar10_cnn.py | f296605e6cfb4b38459aefcfd189cdd36da0de7b | [
"MIT"
] | permissive | seba-1511/gsoc15-demo | 42152c335e6eb8e91479dee4ab0db5376ba55ec4 | 7fa542f33fdb39d73e2b11318c046ecf35fb9bcf | refs/heads/master | 2021-01-18T14:34:28.686048 | 2015-04-20T02:26:10 | 2015-04-20T02:26:10 | 33,458,769 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
'''
Train a (fairly simple) deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
batch_size = 32
nb_classes = 10
nb_epoch = 25
data_augmentation = True
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data(test_split=0.1)
print X_train.shape[0], 'train samples'
print X_test.shape[0], 'test samples'
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten(64*8*8))
model.add(Dense(64*8*8, 512, init='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='normal'))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print "Not using data augmentation or normalization"
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print 'Test score:', score
else:
print "Using real time data augmentation"
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
for e in range(nb_epoch):
print '-'*40
print 'Epoch', e
print '-'*40
print "Training..."
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
loss = model.train(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("train loss", loss)])
print "Testing..."
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
for X_batch, Y_batch in datagen.flow(X_test, Y_test):
score = model.test(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("test loss", score)])
| [
"seba-1511@hotmail.com"
] | seba-1511@hotmail.com |
b0343599369edefd5045f582b653e85406f9da25 | 5ce2e7ac259fa4482a9b5cb668346cbf14bc9a2d | /src/plt_roc.py | 223fee4ae7c5666b1d79a3ea41deda5ae39b1a20 | [] | no_license | Sapphirine/Analysis-on-Children-Learning-Performance | 708e65d1a0330fec6c873a5b0a96b9198b9fe7a4 | da522fc9019238c8cc332045b40541578ffc6ba0 | refs/heads/master | 2020-11-26T17:44:55.074527 | 2019-12-20T00:55:30 | 2019-12-20T00:55:30 | 229,163,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import os
from src.model import model
def clear_temp():
for i in range(1, 4):
folder_name = 'result_' + str(i)
file_list = [f for f in os.listdir("static/temp/" + folder_name + '/') if f.endswith(".png")]
for f in file_list:
os.remove("static/temp/" + folder_name + '/' + f)
def create_pic(test_num, names, model_name):
if not names[model_name]:
name = '1'
else:
name = str(max(names[model_name]) + 1)
folder_name = 'result_' + model_name[-1]
if model_name[-1] == '1':
score = model.predict(test_num, 1, folder_name, name)
elif model_name[-1] == '2':
score = model.predict(test_num, 2, folder_name, name)
elif model_name[-1] == '3':
score = model.predict(test_num, 3, folder_name, name)
return name, score
| [
"noreply@github.com"
] | Sapphirine.noreply@github.com |
2435312c8826faf2062cab3201f18f92c06d0d66 | 1561c6b62982c33c2b9b028af1369832d7c190c3 | /synaptor/seg_utils/relabel.py | 91343bf2a92925a627505864733973550b225989 | [
"MIT"
] | permissive | nkemnitz/Synaptor | b60f33e51ed045e7cdaf18465af3d80edaca1cf3 | 40618786d5b762eb3877ecac49ff310f3e6f892d | refs/heads/master | 2020-05-06T15:43:38.584607 | 2019-04-07T17:31:57 | 2019-04-07T17:31:57 | 180,205,457 | 1 | 0 | NOASSERTION | 2019-04-08T18:05:03 | 2019-04-08T18:05:03 | null | UTF-8 | Python | false | false | 2,654 | py | import numpy as np
from . import describe
from . import _relabel
def relabel_data(d, mapping, copy=True):
"""
Relabel data according to a mapping dict.
Modify the entries of :param:d according to a :param:mapping dictionary.
If a value within :param:d doesn't match a key for :param:mapping,
leave it unchanged.
Args:
d (3darray): A data volume.
mapping (dict): A mapping from data values in d to new desired values.
copy (bool): Whether or not to perform relabeling in-place. Defaults
to True, which will create a new volume.
Returns:
3darray: A modified or newly created volume with the
desired modifications.
"""
if copy:
d = np.copy(d)
return _relabel.relabel_data(d, mapping)
def relabel_data_1N(d, copy=True):
"""
Relabel segment values from 1:N
Args:
d (3darray): A segmentation.
copy (bool): Whether or not to perform relabeling in-place. Defaults
to True, which will create a new volume.
Returns:
3darray: A modified or newly created volume with new segids.
"""
mapping = {v: i+1 for (i, v) in enumerate(describe.nonzero_unique_ids(d))}
return relabel_data(d, mapping, copy=copy)
def relabel_data_iterative(d, mapping):
"""
Python-based iterative relabeling
Remapping data according to an id mapping using an iterative strategy.
Best when only modifying a few ids. If a value within d doesn't match
a key for mapping, leave it unchanged.
Args:
d (3darray): A segmentation.
mapping (dict): A mapping from data values in d to new desired values.
Returns:
3darray: A new volume with the desired modifications.
"""
r = np.copy(d)
src_ids = set(np.unique(d))
mapping = dict(filter(lambda x: x[0] in src_ids, mapping.items()))
for (k, v) in mapping.items():
r[d == k] = v
return r
def relabel_data_lookup_arr(d, mapping):
"""
Python-based lookup array relabeling
Remapping data according to an id mapping using a lookup np array.
Best when modifying several ids at once and ids are approximately dense
within 1:max
Args:
d (3darray): A segmentation.
mapping (dict): A mapping from data values in d to new desired values.
Returns:
3darray: A new volume with the desired modifications.
"""
if len(mapping) == 0:
return d
map_keys = np.array(list(mapping.keys()))
map_vals = np.array(list(mapping.values()))
map_arr = np.arange(0, d.max()+1)
map_arr[map_keys] = map_vals
return map_arr[d]
| [
"nturner.stanford@gmail.com"
] | nturner.stanford@gmail.com |
2f847646f43a261924fc84f50fb8e1f46ebf1b26 | 5b01236940cb3b1bb2e987797a0e07868133a85b | /app/error.py | bd23e12d9065af85513cc0e57d88c9b0ff65e2c4 | [] | no_license | dicksonkariuki/Watchlist | 47cf68c45d1ecd810c986a12cb8934ab8453e09c | 2089a577ff6b8bf07d14232658ce9671b6ebb899 | refs/heads/master | 2020-08-08T19:03:24.766702 | 2019-10-17T07:28:38 | 2019-10-17T07:28:38 | 213,634,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from flask import render_template
from app import app
@app.errorhandler(404)
def four_Ow_four(error):
"""
Function to render the 404 page
"""
return render_template ('fourOwfour.html'),404 | [
"dicksonkariuki4@gmail.com"
] | dicksonkariuki4@gmail.com |
d9b3012794241a6b430ddc7807eaaf0d74e8c56f | d8ea695288010f7496c8661bfc3a7675477dcba0 | /examples/raspberry_pi/relay.py | 01f0a68e794f40467c91592b842f2802038c96ef | [] | no_license | dabolau/demo | de9c593dabca26144ef8098c437369492797edd6 | 212f4c2ec6b49baef0ef5fcdee6f178fa21c5713 | refs/heads/master | 2021-01-17T16:09:48.381642 | 2018-10-08T10:12:45 | 2018-10-08T10:12:45 | 90,009,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import RPi.GPIO as GPIO
import time
def relay(i=0):
# 设置针脚模式为(BOARD)
GPIO.setmode(GPIO.BOARD)
# 禁用警告
GPIO.setwarnings(False)
# 设置针脚
PIN = 40
# 设置针脚为输出模式
GPIO.setup(PIN, GPIO.OUT)
# 设置开关(0/1),0表示关,1表示开。
INT = i
# 开(闭合)
if INT == 1:
GPIO.output(PIN, GPIO.HIGH) # 高电平输出
print('power on')
# 关(断开)
if INT == 0:
GPIO.output(PIN, GPIO.LOW) # 低电平输出
print('power off')
# 延时5秒
time.sleep(5)
# 释放针脚
GPIO.cleanup()
if __name__ == '__main__':
relay(1) # 开
relay(0) # 关
relay(1) # 开
| [
"dabolau@qq.com"
] | dabolau@qq.com |
c6eb011206a6832c3dd908dc6cb075ac850cb450 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/binary-tree-right-side-view.py | c8b32518fe78924faa153d9767c0efaaf96c2cc5 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 1,306 | py | # Time: O(n)
# Space: O(h)
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
result = []
self.rightSideViewDFS(root, 1, result)
return result
def rightSideViewDFS(self, node, depth, result):
if not node:
return
if depth > len(result):
result.append(node.val)
self.rightSideViewDFS(node.right, depth+1, result)
self.rightSideViewDFS(node.left, depth+1, result)
# BFS solution
# Time: O(n)
# Space: O(n)
class Solution2(object):
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
if root is None:
return []
result, current = [], [root]
while current:
next_level = []
for node in current:
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
result.append(node.val)
current = next_level
return result
| [
"noreply@github.com"
] | black-shadows.noreply@github.com |
be894f7257499ff10f4f259ce795d3033f32e04e | b1f748d761751e89f62cf5b8a2b13adac5bf3a29 | /metermaster/urls.py | dcdb41d64262561d4761bce0daa60a73a5e3078d | [] | no_license | sangeeth-subramoniam/buildingmanagementheroku | 7b77be693fa73dbd2dff9c816bf50daf1e501029 | db26de549f7088d2ff80a303abeeaaa548d43e0b | refs/heads/master | 2023-07-08T13:46:06.384694 | 2021-08-10T06:50:14 | 2021-08-10T06:50:14 | 392,492,925 | 0 | 0 | null | 2021-08-04T02:46:57 | 2021-08-04T00:14:10 | Python | UTF-8 | Python | false | false | 409 | py | from django.urls import path,include
from . import views
app_name = 'metermaster'
urlpatterns = [
path('', views.home , name = "home"),
path('metermaster_update_form/<int:pk>', views.updatemeterForm , name = 'updateMeterForm'),
path('metermaster_delete_form/<int:pk>', views.deletemeterForm , name = 'deleteMeterForm'),
path('ajax/load-stores/', views.load_store, name='ajax_load_stores'),
]
| [
"s-sangeeth-k@sicis.co.jp"
] | s-sangeeth-k@sicis.co.jp |
1999c84509f04a543cf1c61c698ae75b971dd835 | f3ed1631f5cfb10ec3c03974a04f73e1e8dd5829 | /handofcats/middlewares/__init__.py | 2a4c39ee259addb6db2d9c2c2ba965c6c9a45062 | [] | no_license | tell-k/handofcats | 9839e20eb3731890a16dcb6d864b7fc13ee80032 | 135e9abac83db318a7b07337191a1d4f699f7ef2 | refs/heads/master | 2020-12-25T22:29:35.495296 | 2016-01-10T00:29:30 | 2016-01-10T00:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # -*- coding:utf-8 -*-
from functools import wraps
class MiddlewareApplicator(object):
def __init__(self, fns):
self.middlewares = [middlewarefy(fn) for fn in fns]
def register(self, fn):
self.middlewares.append(middlewarefy(fn))
def __call__(self, fn):
def call(*args, **kwargs):
context = {}
context["_args"] = args
context["_keys"] = list(kwargs.keys())
context.update(kwargs)
def create_result(context):
args = context["_args"]
kwargs = {k: context[k] for k in context["_keys"]}
return fn(*args, **kwargs)
closure = create_result
for m in reversed(self.middlewares):
closure = m(closure)
return closure(context)
return call
def middlewarefy(fn):
@wraps(fn)
def middleware(closure):
return lambda context: fn(context, closure)
return middleware
from .verbosity_adjustment import middleware_verbosity_adjustment
DEFAULT_MIDDLEWARES = [
middleware_verbosity_adjustment,
]
| [
"podhmo+altair@beproud.jp"
] | podhmo+altair@beproud.jp |
3588b3df70f9fbd1b7167ef3bfa267d162441634 | a487691662edb19792007571fc084e68f180af0a | /2020/mapreduceInPython/mapper.py | d3bd22bddd279da6ca99da7d3585b8bd1619f2ba | [] | no_license | eiahb3838ya/PHBS_BigData_2019 | a74231817b1114079961c7d4dba8b7adc2794cad | 91b71d229188cf750e4acf093615bfba5e27ca96 | refs/heads/master | 2021-07-15T06:23:43.505842 | 2020-11-12T03:32:29 | 2020-11-12T03:32:29 | 225,119,161 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 19:31:24 2020
@author: eiahb
"""
import sys
from multiprocessing import Pool
import time
def main():
# 读入每行input
for line in sys.stdin:
aRecord = line.split(",")
stockTimeStamp = "{}_{}".format(aRecord[0], aRecord[1][:12])
# results = []
print("%s\t%s" % (stockTimeStamp,aRecord[2]))
if __name__ =="__main__":
tic = time.time()
main()
toc = time.time() - tic
| [
"eiahb3838ya@gmail.com"
] | eiahb3838ya@gmail.com |
c4a51811da26e90ea2d38213de6bbed6a36e762f | fd60c2370bf5fb2355c4b30a30ad5ce9c62bc10d | /orc/arp.py | 6e98cd52efe755a4ea35da057dff2e7af733f3f8 | [] | no_license | hecanjog/hcj.py | 08e43edf62330e1b9e0448edf549c7d18e2e9699 | e42538cd48499bb9e9c11321b2f9db56f15486b4 | refs/heads/master | 2021-01-21T04:54:46.693980 | 2020-01-24T02:03:06 | 2020-01-24T02:03:06 | 19,010,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | from pippi import dsp
from pippi import tune
from hcj import fx
midi = {'pc': 3}
def play(ctl):
param = ctl.get('param')
lpd = ctl.get('midi').get('pc')
lpd.setOffset(111)
key = 'g'
#bd = dsp.read('/home/hecanjog/sounds/drums/Tinyrim2.wav').data
#bd = dsp.read('/home/hecanjog/sounds/drums/Jngletam.wav').data
#bd = dsp.read('/home/hecanjog/sounds/drums/78oh.wav').data
#bd = dsp.amp(bd, 1)
#bd = dsp.transpose(bd, dsp.rand(0.65, 0.72) / 1)
#bd = dsp.transpose(bd, dsp.rand(0.3, 0.32) / 1)
chord = tune.fromdegrees([1,8], root='g', octave=dsp.randint(0,2))
chord.reverse()
chord = dsp.rotate(chord, lpd.geti(4, low=0, high=len(chord)-1))
#chord = dsp.randshuffle(chord)
reps = param.get('reps', default=16)
rep = param.get('rep', default=0)
beat = dsp.bpm2frames(130) / 4
beat = dsp.mstf(4100) / 32
#length = beat
out = ''
for n in range(4):
freq = chord[int(rep) % len(chord)]
if dsp.rand() > 0.5:
freq *= 2**dsp.randint(0, lpd.geti(7, low=0, high=8, default=0))
pw = lpd.get(8, low=0.1, high=1, default=1)
#length = dsp.mstf(lpd.get(2, low=50, high=2500, default=500) * dsp.rand(0.5, 2))
length = dsp.mstf(lpd.get(14, low=50, high=5000, default=500))
wf = dsp.wavetable('tri', 512)
wf = dsp.wavetable('impulse', 512)
wf = dsp.wavetable('sine2pi', 512)
wf = dsp.breakpoint([0] + [ dsp.rand(-1,1) for w in range(lpd.geti(15, low=4, high=200, default=4)) ] + [0], 512)
win = dsp.wavetable('sine', 512)
mod = [ dsp.rand(0, 1) for m in range(512) ]
modr = dsp.rand(0.01, 0.02)
modr = lpd.get(16, low=0.01, high=1, default=1)
modf = dsp.rand(0.5, 2)
amp = lpd.get(6, low=0, high=2, default=0)
amp = dsp.rand(0, 2)
o = dsp.pulsar(freq, length, pw, wf, win, mod, modr, modf, amp)
o = dsp.env(o, 'random')
o = dsp.taper(o, dsp.mstf(10))
o = dsp.pan(o, dsp.rand())
rep = rep + 1
out += o
#out = dsp.mix([ dsp.fill(bd, dsp.flen(out), silence=True), out ])
param.set('rep', (rep + 1) % reps)
return out
| [
"erik@hecanjog.com"
] | erik@hecanjog.com |
ba436d0fe6e4b79670c1531daca1fcb18e165398 | 48fcd5b9203c5f34dcad9483259c0f3d46f5d48b | /codeacademy-python3/base_exponent.py | 17a5f5046f4816dd6ad60e798125ea7c861562f1 | [] | no_license | ssaulrj/codes-python | 438dd691815d0a688d264928eb07187ba30c2138 | 04b75b001de60a5e202ad373f3379864753ce203 | refs/heads/master | 2022-11-17T11:40:18.883096 | 2020-07-06T00:57:58 | 2020-07-06T00:57:58 | 234,440,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | # Write your large_power function here:
def large_power(base, exponent):
if base**exponent > 5000:
return True
else:
return False
# Uncomment these function calls to test your large_power function:
print(large_power(2, 13))
# should print True
print(large_power(2, 12))
# should print False
| [
"noreply@github.com"
] | ssaulrj.noreply@github.com |
1032cddc93ebd229103c6e5a1e6c3da61571fa57 | 4c6cb7019d06a1c0c588bb98fb359a536c8ae8ea | /04-03/todolist/buy/models.py | 05fdbd3c8532d624dc018c762eb55337dd07c3ec | [] | no_license | hanifmisbah/tugas_bersama | 2be54f4b386a470b04ca29aa293246985b44707a | 4bd4e195b56090ca9256b9e319bb34b92a86d032 | refs/heads/master | 2022-12-19T03:15:33.085665 | 2020-09-10T09:07:56 | 2020-09-10T09:07:56 | 294,304,050 | 0 | 0 | null | 2020-09-10T04:37:28 | 2020-09-10T04:37:28 | null | UTF-8 | Python | false | false | 229 | py | from django.db import models
# Create your models here.
class Buy(models.Model):
name = models.TextField(default='')
brg = models.TextField(default='')
jmlh = models.TextField(default='')
price = models.TextField(default='') | [
"hanifmisbah97@gmail.com"
] | hanifmisbah97@gmail.com |
d6a4beafdd972f88c046bce2fb861e95ccfb9b20 | ea767918d1391d950714d3fafabf65330bade863 | /odin/bay/distributions/quantized.py | bf60b13f432ab1ab8ac1ca5af1b42c52e44d16aa | [
"MIT"
] | permissive | tirkarthi/odin-ai | f5bb33d02047025029891e1282b9bd389eb4eb07 | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | refs/heads/master | 2023-06-02T20:15:11.233665 | 2020-09-25T09:57:28 | 2020-09-25T09:57:28 | 298,744,248 | 0 | 0 | MIT | 2020-09-26T05:29:11 | 2020-09-26T05:29:10 | null | UTF-8 | Python | false | false | 1,797 | py | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.distributions import (
NegativeBinomial, Normal, QuantizedDistribution, TransformedDistribution,
Uniform)
from tensorflow_probability.python.internal import dtype_util
__all__ = ["qUniform", "qNormal"]
class qNormal(QuantizedDistribution):
def __init__(self,
loc=0.,
scale=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qNormal"):
super(qNormal,
self).__init__(distribution=Normal(loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
class qUniform(QuantizedDistribution):
def __init__(self,
low=0.,
high=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qUniform"):
super(qUniform,
self).__init__(distribution=Uniform(low=low,
high=high,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
| [
"nickartin13@gmail.com"
] | nickartin13@gmail.com |
c6281301f2104fda3c8e84f6c963abd6f8f8925d | fb84fa89744e25a6842e5a22cc9aa35f17cb9c79 | /pyquant/marketdata/spot.py | 845f68291cb39b90413809921767447a73b176ad | [] | no_license | masa4u/pyquant-xmlrpc | dbcf92d257cb89d033f9c7811799126412bca9f8 | 54565f0e71fa819a69ba3e3b92a012dbf5a8046f | refs/heads/master | 2016-09-06T10:47:01.093006 | 2015-03-30T02:00:16 | 2015-03-30T02:00:16 | 30,795,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from pyquant.marketdata.marketdata import MarketDataType
from pyquant.marketdata.single import MarketDataSingle
class MarketDataSpot(MarketDataSingle):
def __init__(self):
super(MarketDataSpot, self).__init__()
print MarketDataSpot().data_type
print MarketDataSpot().value
if __name__ == '__main__':
from pyquant.marketdata.libor import MarketDataLibor
from pyquant.marketdata.cmt import MarketDataCMT
from pyquant.marketdata.cms import MarketDataCMS
from pyquant.marketdata.curve import MarketDataCurve
if issubclass(MarketDataSpot, MarketDataSingle):
print 'yes'
single_data_list = [MarketDataSpot, MarketDataLibor, MarketDataCMT, MarketDataCurve]
for c in single_data_list:
print c.__name__, issubclass(c, MarketDataSingle)
| [
"masa4u@gmail.com"
] | masa4u@gmail.com |
37919b373e29fe85008749e2a8c1d126d697b1f8 | 6c677098c78b3f410019ac26f116cd8539949d35 | /utils/money.py | b1e72ef7c897fae87de3c441e35f2689b539d670 | [
"MIT"
] | permissive | Pythonian/bsawf | eb05dcf7eeb3fab10dad269f9018fc3aa56c967e | 3e422a81cfb1b157119473c20b94a9a01f8b9672 | refs/heads/master | 2023-05-27T20:32:25.965703 | 2022-03-16T14:57:26 | 2022-03-16T14:57:26 | 253,907,876 | 0 | 0 | MIT | 2023-05-02T20:53:12 | 2020-04-07T20:44:53 | Python | UTF-8 | Python | false | false | 375 | py | def cents_to_dollars(cents):
"""
Convert cents to dollars.
:param cents: Amount in cents
:type cents: int
:return: float
"""
return round(cents / 100.0, 2)
def dollars_to_cents(dollars):
"""
Convert dollars to cents.
:param dollars: Amount in dollars
:type dollars: float
:return: int
"""
return int(dollars * 100)
| [
"prontomaster@gmail.com"
] | prontomaster@gmail.com |
f280c39f7214cc27bd841b0e53dbfb11c472c4c1 | a9672f0eb530d0d550070b48fe9d324063ace40b | /dataset/extend_existing_dataset.py | 483a1e4831792d7f6b9b1a2af81868d98beb345d | [
"BSD-3-Clause"
] | permissive | SandUhrGucker/Voice-Cloning-App | 7e025e5493ec0db723e057478e4a11080ed327a3 | 58488aa5690fcb94c778fb6f4d4d909b9f223c72 | refs/heads/main | 2023-07-31T13:10:53.383959 | 2021-09-20T18:53:59 | 2021-09-20T18:53:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | import argparse
import logging
import os
from os.path import dirname, abspath
import sys
sys.path.append(dirname(dirname(abspath(__file__))))
from dataset.audio_processing import convert_audio
from dataset.clip_generator import extend_dataset, MIN_LENGTH, MAX_LENGTH
from dataset.analysis import save_dataset_info
def extend_existing_dataset(
text_path,
audio_path,
transcription_model,
forced_alignment_path,
output_path,
label_path,
suffix,
info_path,
logging=logging,
min_length=MIN_LENGTH,
max_length=MAX_LENGTH,
min_confidence=0.85,
combine_clips=True,
):
"""
Extends an existing dataset.
Converts audio to required format, generates clips & produces required files.
Parameters
----------
text_path : str
Path to source text
audio_path : str
Path to source audio
transcription_model : TranscriptionModel
Transcription model
forced_alignment_path : str
Path to save alignment JSON to
output_path : str
Path to save audio clips to
label_path : str
Path to save label file to
suffix : str
String suffix to append to filenames
info_path : str
Path to save info JSON to
logging : logging (optional)
Logging object to write logs to
min_confidence : float (optional)
Minimum confidence score to generate a clip for
Raises
-------
AssertionError
If given paths are invalid or clips could not be produced
"""
assert os.path.isdir(output_path), "Missing existing dataset clips folder"
assert os.path.isfile(label_path), "Missing existing dataset metadata file"
logging.info(f"Coverting {audio_path}...")
converted_audio = convert_audio(audio_path)
extend_dataset(
converted_audio,
text_path,
transcription_model,
forced_alignment_path,
output_path,
label_path,
suffix,
logging=logging,
min_length=min_length,
max_length=max_length,
min_confidence=min_confidence,
combine_clips=combine_clips,
)
logging.info("Getting dataset info...")
# Do not pass clip lengths from extend_dataset as we need to get size of entire dataset (not just new clips)
save_dataset_info(label_path, output_path, info_path)
if __name__ == "__main__":
"""Extend existing dataset"""
parser = argparse.ArgumentParser(description="Extend existing dataset")
parser.add_argument("-t", "--text_path", help="Path to text file", type=str, required=True)
parser.add_argument("-a", "--audio_path", help="Path to audio file", type=str, required=True)
parser.add_argument(
"-f", "--forced_alignment_path", help="Path to forced alignment JSON", type=str, default="align.json"
)
parser.add_argument("-o", "--output_path", help="Path to save snippets", type=str, default="wavs")
parser.add_argument(
"-l", "--label_path", help="Path to save snippet labelling text file", type=str, default="metadata.csv"
)
parser.add_argument("-s", "--suffix", help="String suffix for added files", type=str, required=True)
parser.add_argument("-i", "--info_path", help="Path to save info file", type=str, default="info.json")
args = parser.parse_args()
extend_existing_dataset(**vars(args))
| [
"bandrew01@qub.ac.uk"
] | bandrew01@qub.ac.uk |
c8266c779bd15012980580dab2a2b0f598c212e9 | 38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a | /nibbler/trading/collectors/AlgoTrader/utils/__init__.py | 35df5938672fc9ea34ff2f1b55ef71e5816f2d1b | [] | no_license | JizzFactoryEmployee/nibblerppman | 0fbc1ce662cf8b4868b41a97291250fae29dc41d | 160e557578a3e8a614450354f6ade233d32b052f | refs/heads/master | 2022-11-14T01:10:31.743000 | 2020-07-04T01:21:52 | 2020-07-04T01:21:52 | 273,835,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | time_frames = {
'1m': 60*1000,
'5m': 60*1000,
'15m': 60*1000,
'1h': 60*60*1000,
'2h': 2*60*60*1000,
'4h': 4*60*60*1000,
'12h': 12*60*60*1000,
'd': 24*60*60*1000,
'w': 7*24*60*60*1000,
'M': 30*24*60*60*1000,
}
from .function_time_frame_multiplier import (
time_frame_mex, time_frame_multiplier
) | [
"52958901+JizzFactoryEmployee@users.noreply.github.com"
] | 52958901+JizzFactoryEmployee@users.noreply.github.com |
c7486e10f1985261033d2b69fb7b594037405208 | 8d3dddecd11126f51440a4aebe8913d90b6d4e0e | /attractions_qunar/attractions_qunar/pipelines.py | d8ef0ed2a585de97f717b11bada2a590c8da4982 | [] | no_license | ivanliu1989/routes-scraper | 108168c4225df70172df4a41869e650efd0ff0dc | 251e03a68d09fd311f0e49545001b777eb8460df | refs/heads/master | 2020-04-22T16:05:11.328359 | 2019-03-03T08:24:38 | 2019-03-03T08:24:38 | 170,497,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class AttractionsQunarPipeline(object):
def process_item(self, item, spider):
return item
| [
"ivan.liuyanfeng@gmail.com"
] | ivan.liuyanfeng@gmail.com |
647ee5c0365253201ebc228f53866ed68e2dac87 | f55d730de1f9740aa8cc56b5d404b454dc560963 | /todo_app/todo/apps.py | 26ae8991b56b54f8890c2dd09e5b7dc38b2cd723 | [] | no_license | momchilantonov/ToDoApp | 4857e5d1c7f9d5ae8b2051f0114d1e59666d9a54 | 546032b977658ef1b5767abc049e4cced1840def | refs/heads/main | 2023-06-01T03:02:37.347426 | 2021-06-23T10:07:11 | 2021-06-23T10:07:11 | 374,223,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.apps import AppConfig
class TodoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'todo_app.todo'
| [
"eng.antonov@gmail.com"
] | eng.antonov@gmail.com |
866d59ff1f0d2711412caf59b1629bc830a0a8ba | 7486b3af4d4413a96b3e0bf76f776cd8605d7c05 | /WonyJeong/programmers/level2/124.py | 3c76022eaa94e98f79a36ece320078fa5e57430b | [] | no_license | WonyJeong/algorithm-study | 7146e18ec9a3d7f46910e31890768b2e37f8b9b4 | dd659bf75c902800bed226d392d144b691d8e059 | refs/heads/main | 2023-03-31T14:38:47.365622 | 2021-04-02T01:35:36 | 2021-04-02T01:35:36 | 334,309,434 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # def solution(n):
# answer = ""
# arr = ["4", "1", "2"]
# while n:
# answer = arr[n % 3] + answer
# n = n // 3 - (n % 3 == 0)
# return answer
# for i in range(1, 15):
# print(i, " : ", solution(i))
# 9494
import sys
input = sys.stdin.readline
if __name__ == "__main__":
N = int(input().strip())
while N != 0:
text = []
for _ in range(N):
text.append(len(input().strip().split()[0]))
print(text)
print(max(text) + 1)
N = int(input().strip()) | [
"59948675+WonyJeong@users.noreply.github.com"
] | 59948675+WonyJeong@users.noreply.github.com |
5454e587a38bd0c25fc6e81b25d9de677ba6d29e | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/wasabi/tables.py | a6c71603a2adcb97ef07d9afa29efc2944a607c1 | [
"Apache-2.0",
"MIT"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 3,941 | py | # coding: utf8
from __future__ import unicode_literals, print_function
from .util import to_string, zip_longest, basestring_
ALIGN_MAP = {"l": "<", "r": ">", "c": "^"}
def table(
data,
header=None,
footer=None,
divider=False,
widths="auto",
max_col=30,
spacing=3,
aligns=None,
multiline=False,
indent=0,
):
"""Format tabular data.
data (iterable / dict): The data to render. Either a list of lists (one per
row) or a dict for two-column tables.
header (iterable): The header columns.
footer (iterable): The footer columns.
divider (bool): Show a divider line between header/footer and body.
widths (iterable or 'auto'): Column widths in order. If "auto", widths
will be calculated automatically based on the largest value.
max_col (int): Maximum column width.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
multiline (bool): If a cell value is a list of a tuple, render it on
multiple lines, with one value per line.
indent (int): Number of spaces to use for indentation.
RETURNS (unicode): The formatted table.
"""
if isinstance(data, dict):
data = list(data.items())
if multiline:
zipped_data = []
for i, item in enumerate(data):
vals = [v if isinstance(v, (list, tuple)) else [v] for v in item]
zipped_data.extend(list(zip_longest(*vals, fillvalue="")))
if i < len(data) - 1:
zipped_data.append(["" for i in item])
data = zipped_data
if widths == "auto":
widths = _get_max_widths(data, header, footer, max_col)
settings = {
"widths": widths,
"spacing": spacing,
"aligns": aligns,
"indent": indent,
}
divider_row = row(["-" * width for width in widths], **settings)
rows = []
if header:
rows.append(row(header, **settings))
if divider:
rows.append(divider_row)
for i, item in enumerate(data):
rows.append(row(item, **settings))
if footer:
if divider:
rows.append(divider_row)
rows.append(row(footer, **settings))
return "\n{}\n".format("\n".join(rows))
def row(data, widths="auto", spacing=3, aligns=None, indent=0):
"""Format data as a table row.
data (iterable): The individual columns to format.
widths (iterable, int or 'auto'): Column widths, either one integer for all
columns or an iterable of values. If "auto", widths will be calculated
automatically based on the largest value.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
indent (int): Number of spaces to use for indentation.
RETURNS (unicode): The formatted row.
"""
cols = []
if isinstance(aligns, basestring_): # single align value
aligns = [aligns for _ in data]
if not hasattr(widths, "__iter__"): # single number
widths = [widths for _ in range(len(data))]
for i, col in enumerate(data):
align = ALIGN_MAP.get(aligns[i] if aligns and i < len(aligns) else "l")
col_width = len(col) if widths == "auto" else widths[i]
tpl = "{:%s%d}" % (align, col_width)
cols.append(tpl.format(to_string(col)))
return indent * " " + (" " * spacing).join(cols)
def _get_max_widths(data, header, footer, max_col):
all_data = list(data)
if header:
all_data.append(header)
if footer:
all_data.append(footer)
widths = [[len(to_string(col)) for col in item] for item in all_data]
return [min(max(w), max_col) for w in list(zip(*widths))]
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
c890957d28cadac134e3484f1a486d85c08e3454 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_buffets.py | 235a40b7167bfe4459bba376990f1586441cf568 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._buffet import _BUFFET
#calss header
class _BUFFETS(_BUFFET, ):
def __init__(self,):
_BUFFET.__init__(self)
self.name = "BUFFETS"
self.specie = 'verbs'
self.basic = "buffet"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1be6d2a87f5e4bc6b2fb71d1525a1f78a6dadd41 | e04dbc32247accf073e3089ed4013427ad182c7c | /sumitb2019/C.py | 3653705f97554a609bfbf09a858d5d307f6c71a6 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # from math import factorial,sqrt,ceil,gcd
# from itertools import permutations as permus
from collections import deque,Counter
# import re
# from functools import lru_cache # 簡単メモ化 @lru_cache(maxsize=1000)
# from decimal import Decimal, getcontext
# # getcontext().prec = 1000
# # eps = Decimal(10) ** (-100)
# import numpy as np
# import networkx as nx
# from scipy.sparse.csgraph import shortest_path, dijkstra, floyd_warshall, bellman_ford, johnson
# from scipy.sparse import csr_matrix
# from scipy.special import comb
# slist = "abcdefghijklmnopqrstuvwxyz"
X = int(input())
dp = {100:1,101:1,102:1,103:1,104:1,105:1}
lis = [100,101,102,103,104,105]
que = deque([100,101,102,103,104,105])
while len(que)>0:
num = que.popleft()
for i in lis:
dp[num+i] = 1
if num+i <= 100000 and not(num+i in que):
que.append(num + i)
if X in dp:
ans = 1
else:
ans = 0
print(ans)
# print(*ans) # unpackして出力。間にスペースが入る
# for row in board:
# print(*row,sep="") #unpackして間にスペース入れずに出力する
# print("{:.10f}".format(ans))
# print("{:0=10d}".format(ans))
| [
"twobookscom@gmail.com"
] | twobookscom@gmail.com |
9be1a32eae5acfc9bd5b8570c0052eb586a1891e | 956fd28ea7a7ec83b62cd85691c512e735e60b3a | /bin/azure/mgmt/datamigration/models/project_task_properties_py3.py | cbd010514ded3c5031944e6c904da5df706b5e3a | [
"MIT"
] | permissive | zdmc23/bash-lambda-layer | 5517a27809d33801c65504c11f867d0d511b2e1c | e762df0189cfb894dab2d96bae1655b8857d5efb | refs/heads/master | 2021-01-05T02:32:20.765963 | 2020-02-16T09:41:47 | 2020-02-16T09:41:47 | 240,846,840 | 0 | 0 | MIT | 2020-02-16T06:59:55 | 2020-02-16T06:59:54 | null | UTF-8 | Python | false | false | 2,820 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProjectTaskProperties(Model):
"""Base class for all types of DMS task properties. If task is not supported
by current client, this object is returned.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ValidateMigrationInputSqlServerSqlMITaskProperties,
MigrateSqlServerSqlDbTaskProperties, MigrateSqlServerSqlMITaskProperties,
GetUserTablesSqlTaskProperties, ConnectToTargetSqlDbTaskProperties,
ConnectToTargetSqlMITaskProperties, ConnectToSourceSqlServerTaskProperties
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar errors: Array of errors. This is ignored if submitted.
:vartype errors: list[~azure.mgmt.datamigration.models.ODataError]
:ivar state: The state of the task. This is ignored if submitted. Possible
values include: 'Unknown', 'Queued', 'Running', 'Canceled', 'Succeeded',
'Failed', 'FailedInputValidation', 'Faulted'
:vartype state: str or ~azure.mgmt.datamigration.models.TaskState
:param task_type: Required. Constant filled by server.
:type task_type: str
"""
_validation = {
'errors': {'readonly': True},
'state': {'readonly': True},
'task_type': {'required': True},
}
_attribute_map = {
'errors': {'key': 'errors', 'type': '[ODataError]'},
'state': {'key': 'state', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
}
_subtype_map = {
'task_type': {'ValidateMigrationInput.SqlServer.AzureSqlDbMI': 'ValidateMigrationInputSqlServerSqlMITaskProperties', 'Migrate.SqlServer.SqlDb': 'MigrateSqlServerSqlDbTaskProperties', 'Migrate.SqlServer.AzureSqlDbMI': 'MigrateSqlServerSqlMITaskProperties', 'GetUserTables.Sql': 'GetUserTablesSqlTaskProperties', 'ConnectToTarget.SqlDb': 'ConnectToTargetSqlDbTaskProperties', 'ConnectToTarget.AzureSqlDbMI': 'ConnectToTargetSqlMITaskProperties', 'ConnectToSource.SqlServer': 'ConnectToSourceSqlServerTaskProperties'}
}
def __init__(self, **kwargs) -> None:
super(ProjectTaskProperties, self).__init__(**kwargs)
self.errors = None
self.state = None
self.task_type = None
| [
"191707+zdmc23@users.noreply.github.com"
] | 191707+zdmc23@users.noreply.github.com |
381379527748a48ff699ba2f1009df2440fa6a78 | 906b969c383a440940af12f0e1cc01daedc475aa | /data_store/mongo_paginator.py | 07c4ecb439f02eb6a36d85a57a4e659afb6101f2 | [] | no_license | ok-water-survey/api | 6f11a1ac2bb0f4b6822c26fae684447a726bc24b | 3e39910ae9c09d208ce2a855a8920d659ed7049b | refs/heads/master | 2021-01-22T03:54:27.868634 | 2015-01-14T16:53:16 | 2015-01-14T16:53:16 | 25,709,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | __author__ = 'mstacy'
import ast
import math
import collections
from ordereddict import OrderedDict
from rest_framework.templatetags.rest_framework import replace_query_param
def MongoDataPagination(DB_MongoClient, database, collection, query=None, page=1, nPerPage=None, uri=''):
db = DB_MongoClient
if query:
query = ast.literal_eval(query)
q = [(k, v) for k, v in query['spec'].items()]
query['spec'] = dict(q)
print query
count = db[database][collection].find(**query).count()
print count
if nPerPage == 0:
page=1
offset=0
max_page=1
else:
max_page = math.ceil(float(count) / nPerPage)
# Page min is 1
if page < 1:
page = 1
#Change page to last page with data
if page * nPerPage > count:
page = int(max_page)
#Cover count =0
if page < 1:
page = 1
offset = (page - 1) * nPerPage
data = [row for row in db[database][collection].find(**query).skip(offset).limit(nPerPage)]
#replace_query_param(uri, 'page', page)
else:
count = db[database][collection].find().count()
if nPerPage == 0:
page=1
offset=0
max_page=1
else:
max_page = math.ceil(float(count) / nPerPage)
print max_page
# Page min is 1
if page < 1:
page = 1
#Change page to last page with data
if page * nPerPage > count:
page = int(max_page)
#Cover count =0
if page < 1:
page = 1
offset = (page - 1) * nPerPage
data = [row for row in db[database][collection].find().skip(offset).limit(nPerPage)]
if page < max_page:
next = replace_query_param(uri, 'page', page + 1)
else:
next = None
if page > 1:
previous = replace_query_param(uri, 'page', page - 1)
else:
previous = None
result = {'count': count, 'meta': {'page': page, 'page_size': nPerPage, 'pages': int(max_page)}, 'next': next,
'previous': previous, 'results': data}
try:
od = collections.OrderedDict(sorted(result.items()))
except:
# older python versions < 2.7
od = OrderedDict(sorted(result.items()))
return od | [
"mbstacy@gmail.com"
] | mbstacy@gmail.com |
805c58d57c9fad852f9e5bb34ff321d60b1010a5 | 11f5853044bdfe25c85951b5c540bf759478c7d0 | /test/test_sequence_context.py | 76aa96db3e0a72c7c035b926cc88ed29fd55fb41 | [
"Apache-2.0"
] | permissive | alexanderwhatley/pepnet | 2dbe894d31cfeef4b7404092ad6034640a33e791 | 82a3087262917d4780ed8facbd49b766f2ff9200 | refs/heads/master | 2020-04-21T06:03:12.297328 | 2019-02-06T20:55:50 | 2019-02-06T20:55:54 | 169,356,393 | 0 | 0 | Apache-2.0 | 2019-02-06T04:49:07 | 2019-02-06T04:49:06 | null | UTF-8 | Python | false | false | 745 | py | from pepnet import Predictor, SequenceInput, Output
import numpy as np
def test_model_with_fixed_length_context():
model = Predictor(
inputs={
"upstream": SequenceInput(length=1, variable_length=False),
"downstream": SequenceInput(length=1, variable_length=False),
"peptide": SequenceInput(length=3, variable_length=True)},
outputs=Output(1, activation="sigmoid"))
Y = np.array([True, False, True, False])
input_dict = {
"upstream": ["Q", "A", "L", "I"],
"downstream": ["S"] * 4,
"peptide": ["SYF", "QQ", "C", "GLL"]
}
model.fit(input_dict, Y, epochs=20)
Y_pred = model.predict(input_dict)
assert (Y == (Y_pred > 0.5)).all(), (Y, Y_pred)
| [
"alex.rubinsteyn@gmail.com"
] | alex.rubinsteyn@gmail.com |
c2cd7ebdf774bed98d83547aca4237ab5a6368de | e76fda1fba459456c4bc105e7a6dcc6277a1a26c | /django_cv/blog/views.py | 8c57056ca229de5098c6a2de02c156e4b20facf7 | [] | no_license | lafabo/i-love-tutorials | 6bb2a684a201975ab523d9721b02761a6269853c | eafcd47fd62e770107c7e1f08e0d6d60a539f1ec | refs/heads/master | 2021-01-21T04:46:56.365199 | 2016-07-20T17:38:03 | 2016-07-20T17:38:03 | 47,709,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post':post})
| [
"lazyfatboy@ya.ru"
] | lazyfatboy@ya.ru |
f71826cc4a17768511a502866746130a64bd50c5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/145.py | fdf3695e27e276ca2a4e819ce28982205a858fd0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,398 | py | """ imports """
from __future__ import division
import glob, pickle, os, time, sys, argparse
from copy import copy
from numpy import array, sin, cos
import numpy as np
from pylab import *
from pprint import pprint
""" global variables """
""" classes """
""" functions """
def solve(C, F, X):
current_production = 2.
current_cumul_time = C / current_production
while True:
time_to_finish = (X - C) / current_production
time_to_finish_with_factory = X / (current_production + F)
time_to_next_factory_with_factory = C / (current_production + F)
if time_to_finish < time_to_finish_with_factory:
current_cumul_time += time_to_finish
break
else:
current_cumul_time += time_to_next_factory_with_factory
current_production += F
return "{:.7f}".format(current_cumul_time)
""" parse input """
## parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("filename", default="default.in", nargs='?')
parser.add_argument("-t", "--test", action="store_true")
parser.add_argument("-l", "--lazytest", action="store_true")
args = parser.parse_args()
output = ""
TIC = time.time()
## read input lines
input_lines = open(args.filename).readlines()
def read_line():
return input_lines.pop(0).strip()
def read_ints():
return [int(x) for x in read_line().split(' ')]
def read_floats():
return [float(x) for x in read_line().split(' ')]
(numquestions,) = read_ints()
for questionindex in xrange(numquestions):
### parse input ###
C, F, X = read_floats()
### calculate answer ###
answer = solve(C, F, X)
assert answer != None
### output ###
#print "Calculating case #{}...".format(questionindex+1)
answer_str = "Case #{}: {}".format(questionindex+1, answer)
output += answer_str + '\n'
print answer_str
## write output
ofile = open('output', 'w').write(output)
TOC = time.time()
#print "done in {} s".format(TOC-TIC)
""" test """
if args.test:
def filter_extension(filename):
filename_parts = filename.split('.')
if len(filename_parts) > 1:
filename_parts = filename_parts[:-1]
return '.'.join(filename_parts)
print
print "== TESTING VALIDITY =="
try:
# check if all input was used
assert not len([l for l in input_lines if l.strip()]), "Not all input was used"
# filter extension of filename
filename_without_extension = filter_extension(args.filename)
# get calculated and correct lines
calculated_lines = [l.strip() for l in output.split('\n') if l.strip()]
correct_lines = [l.strip() for l in open("{}.out".format(filename_without_extension)).readlines() if l.strip()]
# check if number of lines match
assert len(correct_lines) == len(calculated_lines), "calculated {} lines but expected {}".format(len(calculated_lines), \
len(correct_lines))
# apply lazytest: filter away test numer
unfiltered_calculated_lines = calculated_lines
unfiltered_correct_lines = correct_lines
if args.lazytest:
def filter_test_number(l):
if l.startswith("Case #"):
parts = l.split('#')
parts[1] = parts[1][parts[1].index(':'):]
return '#'.join(parts)
else:
return l
calculated_lines = [filter_test_number(l) for l in calculated_lines]
correct_lines = [filter_test_number(l) for l in correct_lines]
# get lines that don't match
incorrect_line_numbers = []
for line_number, (correct_line, calculated_line) in enumerate(zip(correct_lines, calculated_lines)):
if correct_line != calculated_line:
incorrect_line_numbers.append(line_number)
if len(incorrect_line_numbers):
error_msg = "\n"
for line_number in incorrect_line_numbers:
error_msg += ' "{}" should be "{}"\n'.format(unfiltered_calculated_lines[line_number],
unfiltered_correct_lines[line_number])
raise AssertionError(error_msg)
print "SUCCESS"
except AssertionError as e:
print "\nFAILED:"
print str(e)
print
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
fd357ce22203e6ff0b435b0b66e4f227c52cbb08 | 4c0cfe74b972b6f758d479920118185f07b3ae66 | /lab/lab01/tests/q3_1_2.py | fbb2f30f45921e7bc06228e82c457a74816bf068 | [
"BSD-3-Clause"
] | permissive | ds-modules/Colab-data-8 | 20a72aee6b7d051d2aff50a49f02c89891201971 | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | refs/heads/main | 2023-05-29T04:05:47.976935 | 2021-06-02T23:15:06 | 2021-06-02T23:15:06 | 333,593,562 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | test = { 'name': 'q3_1_2',
'points': 1,
'suites': [ { 'cases': [ { 'code': ">>> #It looks like you didn't give anything the name;\n"
">>> # seconds_in_a_decade. Maybe there's a typo, or maybe you ;\n"
'>>> # just need to run the cell below Question 3.2 where you defined ;\n'
'>>> # seconds_in_a_decade. Click that cell and then click the "run;\n'
'>>> # cell" button in the menu bar above.);\n'
">>> 'seconds_in_a_decade' in vars()\n"
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> # It looks like you didn't change the cell to define;\n"
'>>> # seconds_in_a_decade appropriately. It should be a number,;\n'
">>> # computed using Python's arithmetic. For example, this is;\n"
'>>> # almost right:;\n'
'>>> # seconds_in_a_decade = 10*365*24*60*60;\n'
'>>> seconds_in_a_decade != ...\n'
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> # It looks like you didn't account for leap years.;\n"
'>>> # There were 2 leap years and 8 non-leap years in this period.;\n'
'>>> # Leap years have 366 days instead of 365.;\n'
'>>> seconds_in_a_decade != 315360000\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"cheungclj108@berkeley.edu"
] | cheungclj108@berkeley.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.