blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e446b4b4c3699733ad5922d435466930f6cfb35b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02573/s534378621.py
|
53efd867886d93f3b9f58471c95fd08487dbc066
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# import sys
# input = sys.stdin.readline()
n,m = map(int, input().split())
ab = []
for i in range(m):
a,b = map(int, input().split())
ab.append([a,b])
class UnionFind:
def __init__(self,N):
self.parent = [i for i in range(N)]
self._size = [1] * N
self.count = 0
def root(self,a):
if self.parent[a] == a:
return a
else:
self.parent[a] = self.root(self.parent[a])
return self.parent[a]
def is_same(self,a,b):
return self.root(a) == self.root(b)
def unite(self,a,b):
ra = self.root(a)
rb = self.root(b)
if ra == rb: return
if self._size[ra] < self._size[rb]: ra,rb = rb,ra
self._size[ra] += self._size[rb]
self.parent[rb] = ra
self.count += 1
def size(self,a):
return self._size[self.root(a)]
uf = UnionFind(n)
for i in range(m):
a, b = ab[i][0],ab[i][1]
a -= 1
b -= 1
if uf.is_same(a,b):
continue
uf.unite(a,b)
x = 0
for i in range(n):
x = max(x, uf._size[i])
print (x)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2fe6904da2931f0c0af9091b946ef4de9424f574
|
377ec156e459f70ad32e625de2dde2672736dd06
|
/Exercises/CorePythonExercises/ForMathModel.py
|
527020f502bff6dbe72d50c9e9bb9988e7b05e69
|
[] |
no_license
|
tsonglew/learn-python
|
b657cc34d3e27993ec0dcce152796bea43224d4f
|
edbf0b5d24bf0e2d9ad7aa5811c7d3aa0a66b57c
|
refs/heads/master
| 2021-06-13T13:46:35.199562
| 2017-04-14T16:57:38
| 2017-04-14T16:57:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
# -*- coding: utf-8 -*-
"""
例1 混合泳接力队的选拔
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5名候选人的百米成绩
甲 乙 丙 丁 戊
蝶泳 1'06"8 57"2 1'18" 1'10" 1'07"4
仰泳 1'15"6 1'06" 1'07"8 1'14"2 1'11"
蛙泳 1'27" 1'06"4 1'24"6 1'09"6 1'23"8
自由泳 58"6 53" 59"4 57"2 1'02"4
~~~~~~~~~~~~~~~~~~穷举所有的组队方案~~~~~~~~~~~~~~~~
选择语言: Python
"""
# 各种泳姿所有人的成绩列表,依次为甲、乙、丙、丁、戊(单位:秒)
Butterfly = [66.8, 57.2, 78, 70, 67.4]
Backstrock = [75.6, 66, 67.8, 74.2, 71]
Frog = [87, 66.4, 84.6, 69.6, 83.8]
Free = [58.6, 53, 59.4, 57.2, 62.4]
# 储存所有最终成绩的列表
Result = []
def func():
print "甲记作1,乙记作2,丙记作3,丁记作4,戊记作5"
flag = 1
for a in Butterfly:
for b in Backstrock:
if Backstrock.index(b) == Butterfly.index(a):
continue
for c in Frog:
if Frog.index(c) == Backstrock.index(b) \
or Frog.index(c) == Butterfly.index(a):
continue
for d in Free:
if Free.index(d) == Frog.index(c) \
or Free.index(d) == Backstrock.index(b) \
or Free.index(d) == Butterfly.index(a):
continue
time = a + b + c + d
Result.append(time)
print "第", flag, "种", ".蝶泳第", Butterfly.index(a)+1, "个人",
print "仰泳第", Backstrock.index(b)+1, "个人",
print "蛙泳第", Frog.index(c)+1, "个人",
print "自由泳第", Free.index(d)+1, "个人",
print "总时间:", time
flag = flag + 1
# 所有可能的情况
print "所有的情况共有:", len(Result), " 种"
SortedResult = sorted(Result)
# 用时最短
print "最短时间: ", SortedResult[0], "秒"
num = Result.index(SortedResult[0]) + 1
print "该方法为第", num, "种"
if __name__ == '__main__':
func()
|
[
"417879751@qq.com"
] |
417879751@qq.com
|
fc07e28592592465f34667c2510771d8580a76b1
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_167/ch19_2019_08_30_17_33_49_991320.py
|
255123fd942fa40a66a7196baefba809573f081f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
import math
g=9.8
def calcula_distancia_do_projetil (v,θ,y0):
d==(v**2/2*g)*1+(1 +2*g*y0/(v)**2*(math.sin(θ)**2))**1/2
return d
|
[
"you@example.com"
] |
you@example.com
|
8ca3f112dba2ddf412dc8ed04db02bafdb9be374
|
78b42a602bdabbf28321b267714f0b29e89a669e
|
/3-2.队列.py
|
1d5c1807ba1b3d50d86169ad3decc5bb6aaa2c1c
|
[] |
no_license
|
michelleweii/DataStructure-Algorithms
|
3c95240b8ed48237f23ff97754d6cc15cadc94dd
|
84f5d6e2811f5e6c4578a5e0d381a86cbc414ce9
|
refs/heads/master
| 2020-03-29T13:07:06.910799
| 2018-11-29T12:57:57
| 2018-11-29T12:57:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
# 取元素的端叫做队头,添加元素的端叫做队尾
class Queue(object):
"""队列"""
def __init__(self):
self.__list = []
def enqueue(self,item):
"""往队列中添加一个item元素"""
self.__list.append(item)
def dequeue(self):
"""从队列头部删除一个元素"""
return self.__list.pop(0) # 先进先出
def is_empty(self):
"""判断一个队列是否为空"""
return self.__list == []
def size(self):
"""返回队列的大小"""
return len(self.__list)
# 双端队列,两端都可以进和出,相当于两个栈底部合在了一起
class Deque(object):
"""双端队列"""
def __init__(self):
self.__list = []
def add_front(self, item):
"""往队列头部添加一个item元素"""
self.__list.insert(0,item)
def add_rear(self, item):
"""往队列尾部添加一个item元素"""
return self.__list.append(item) # 先进先出
def pop_front(self):
"""往队列头部删除一个元素"""
return self.__list == self.__list.pop(0)
def pop_rear(self):
"""往队列尾部删除一个元素"""
return self.__list == self.__list.pop()
def is_empty(self):
"""判断一个队列是否为空"""
return self.__list == []
def size(self):
"""返回队列的大小"""
return len(self.__list)
if __name__ == "__main__":
s = Queue()
s.enqueue(1)
s.enqueue(2)
s.enqueue(3)
s.enqueue(4)
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
# 双端队列
s = Queue()
s.enqueue(1)
s.enqueue(2)
s.enqueue(3)
s.enqueue(4)
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
|
[
"641052383@qq.com"
] |
641052383@qq.com
|
44803dedada3fec966306568b761e601637bccc8
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/NasFPN/mmdet/models/detectors/cascade_rcnn.py
|
47cc7cef984123804c4f99900d496807cde3c0e6
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector."""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
6ff102d1cea23a24786b8d1335ce9d535b54fdb3
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/base_gengo/models/res_company.py
|
82bdf8e87666085dc1a354e8cba4071dc6357e27
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
# -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import fields, models
class res_company(models.Model):
_inherit = "res.company"
gengo_private_key = fields.Char(string="Gengo Private Key", copy=False, groups="base.group_system")
gengo_public_key = fields.Text(string="Gengo Public Key", copy=False, groups="base.group_user")
gengo_comment = fields.Text(string="Comments", groups="base.group_user",
help="This comment will be automatically be enclosed in each an every request sent to Gengo")
gengo_auto_approve = fields.Boolean(string="Auto Approve Translation ?", groups="base.group_user", default=True,
help="Jobs are Automatically Approved by Gengo.")
gengo_sandbox = fields.Boolean(string="Sandbox Mode",
help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose.")
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
66e2c5329e9521e3252d6ead99b95654d8cdaed4
|
b7c51f5e564d1f0e622fbe6e144f996d693af8cf
|
/Bins/MakeSparseMatrixSVD.py
|
c24629f3f51015d698bb93ed7c430d0c9260a6cd
|
[] |
no_license
|
GINK03/job-recommender-api
|
a223fb225a1231eaf1b56abd92d9aa8f20ff241b
|
6afdfa915918184debe96f5ac6932dfa30f7d4a5
|
refs/heads/master
| 2022-11-19T16:48:38.776963
| 2020-07-24T15:49:23
| 2020-07-24T15:49:23
| 263,545,893
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,429
|
py
|
import pickle
import gzip
import glob
from scipy.sparse import lil_matrix
from sklearn.decomposition import TruncatedSVD
# import faiss
import numpy as np
from pathlib import Path
from tqdm import tqdm
import sys
from concurrent.futures import ProcessPoolExecutor
import joblib
import pandas as pd
from os import environ as E
import psutil
import time
import bz2
from loguru import logger
HOME = Path.home()
FILE = Path(__file__).name
TOP_DIR = Path(__file__).resolve().parent.parent
def wight_tune(w):
for i in range(5):
w = np.log1p(w)
return w
idf = pd.read_csv(f'{TOP_DIR}/var/doc_freq.csv')
WORD_SIZE = 1000000
if "--create_transformer" in sys.argv:
SAMPLE_SIZE = 1000000
logger.info(f"total word size is = {WORD_SIZE}")
start_time = time.time()
def load(arg):
filename = arg
try:
with bz2.open(filename, "rb") as fp:
vec = pickle.load(fp)
SAMPLE_SIZE = vec["__SAMPLE_SIZE__"]
del vec["__SAMPLE_SIZE__"]
if SAMPLE_SIZE < 100:
return None
return (vec)
except Exception as exc:
logger.error(f"{exc}, {filename}")
Path(filename).unlink()
return None
args = []
for idx, filename in tqdm(enumerate(glob.glob(f"{TOP_DIR}/var/user_vectors/*")[:SAMPLE_SIZE]), desc="load example users..."):
args.append(filename)
mtx = lil_matrix((SAMPLE_SIZE, WORD_SIZE))
counter = 0
with ProcessPoolExecutor(max_workers=psutil.cpu_count()) as exe:
for ret in tqdm(exe.map(load, args), total=len(args), desc="load example users..."):
if ret is None:
continue
vec = ret
for term_idx, weight in vec.items():
if term_idx >= WORD_SIZE:
continue
mtx[counter, term_idx] = wight_tune(weight)
counter += 1
logger.info(mtx.shape)
mtx = mtx[:counter]
logger.info(mtx.shape)
# exit()
logger.info(f"[{FILE}] start to train TruncatedSVD...")
transformer = TruncatedSVD(n_components=500, n_iter=10, random_state=0)
transformer.fit(mtx)
elapsed_time = time.time() - start_time
logger.info(f"[{FILE}] elapsed_time = {elapsed_time}")
logger.info(f"[{FILE}] start to transform matrix...")
X_transformed = transformer.transform(mtx[:5000])
logger.info(X_transformed)
logger.info(X_transformed.shape)
logger.info(type(X_transformed))
joblib.dump(transformer, f"{TOP_DIR}/var/transformer.joblib")
if "--transform" in sys.argv:
transformer = joblib.load(f"{TOP_DIR}/var/transformer.joblib")
""" 1000個づつ分割 """
filenames = glob.glob(f"{TOP_DIR}/var/user_vectors/*")
args = []
STEP = 4000
for i in range(0, len(filenames), STEP):
args.append((i, filenames[i:i+STEP]))
Path(f"{TOP_DIR}/var/transformed").mkdir(exist_ok=True, parents=True)
def load(arg):
key, filenames = arg
mtx = lil_matrix((STEP, WORD_SIZE))
usernames = []
counter = 0
for idx, filename in enumerate(filenames):
try:
with bz2.open(filename, "rb") as fp:
vec = pickle.load(fp)
except Exception as exc:
tb_lineno = sys.exc_info()[2].tb_lineno
logger.error(f"[{FILE}] exc = {exc}, tb_lineno = {tb_lineno}")
continue
SAMPLE_SIZE = vec["__SAMPLE_SIZE__"]
del vec["__SAMPLE_SIZE__"]
if SAMPLE_SIZE < 100:
continue
for term_idx, weight in vec.items():
if term_idx >= 1000000:
continue
mtx[counter, term_idx] = weight
usernames.append(Path(filename).name)
counter += 1
mtx = mtx[:counter]
X_transformed = transformer.transform(mtx)
data = (usernames, X_transformed)
logger.info(f"{len(usernames)}, {X_transformed.shape}")
if len(usernames) != X_transformed.shape[0]:
raise Exception("size not match!")
with bz2.open(f"{TOP_DIR}/var/transformed/{key:09d}.pkl.bz2", "wb") as fp:
fp.write(pickle.dumps(data))
with ProcessPoolExecutor(max_workers=psutil.cpu_count()) as exe:
for _ in tqdm(exe.map(load, args), total=len(args), desc="transforming..."):
_
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
5c2128c48f6cca29296f7fb6db51371bf51bddfe
|
e9e083aa75398a015e55ec5de655c262eb1496c6
|
/mod5-adv/threads/simple-non-daemon.py
|
c84fa8b3f9455600fa262c7ffd6e93d45bccacc2
|
[] |
no_license
|
michaelconst/csuf-pythonprog
|
54d98a878b34038a067c07c649a6025b8380b971
|
017ec2004482bbd20ce24d6c5ec8f0ae2a6cdb78
|
refs/heads/master
| 2021-01-21T10:00:30.268732
| 2017-03-14T01:29:44
| 2017-03-14T01:29:44
| 83,357,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
import threading
import time
import random
def do_work(t):
print('[{}] sleeping {}s'.format(threading.current_thread().name, t))
time.sleep(t)
print('[{}] exiting'.format(threading.current_thread().name))
for i in range(5):
threading.Thread(target=do_work, args=(random.randint(1, 5),)).start()
|
[
"constantinm@sharplabs.com"
] |
constantinm@sharplabs.com
|
3d8e087c32269d3024415ff947d05fb54bc4b5ae
|
826085daea311de883ad1e8dfcc8ef5569f087bf
|
/broca/similarity/term/wikipedia.py
|
33c38f57abeaea4f2b39766fd32b32d37969b214
|
[
"MIT"
] |
permissive
|
parksebastien/broca
|
f2d10cfd6a7dcc6c069ee2e69d5faeb2e1004b67
|
7236dcf54edc0a4a54a55eb93be30800910667e7
|
refs/heads/master
| 2020-05-22T09:48:19.417396
| 2015-09-10T12:35:56
| 2015-09-10T12:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from scipy.spatial.distance import pdist, squareform
from broca.similarity.term import TermSimilarity
from broca.knowledge.wikipedia import Wikipedia
from broca.vectorize.bow import BoWVectorizer
class WikipediaSimilarity(Wikipedia, TermSimilarity):
def __init__(self, terms, wiki_conn=None):
"""
Initialize with a list of terms.
Will fetch Wikipedia pages for each term,
if available, then compute their similarity matrix.
"""
super().__init__(wiki_conn=wiki_conn)
# Term map for similarity matrix lookup later
terms = set(terms)
self.term_map = {t: i for i, t in enumerate(terms)}
# Fetch wikipages, compute cosine similarity matrix
docs = [self.fetch_wikipage(t) for t in terms]
vectr = BoWVectorizer()
vecs = vectr.vectorize(docs)
dist_mat = pdist(vecs.todense(), metric='cosine')
dist_mat = squareform(dist_mat)
self.sim_mat = 1/(1 + dist_mat)
def __getitem__(self, terms):
t1, t2 = terms
try:
i1 = self.term_map[t1]
i2 = self.term_map[t2]
return self.sim_mat[i1, i2]
# Term(s) not found
except KeyError:
return 0.
|
[
"f+accounts@frnsys.com"
] |
f+accounts@frnsys.com
|
52fc0a314b8c7900c41339efe131dd5f2bc66806
|
8c69736d7ac2541be761d728284d315cefa90d28
|
/nodes/1.x/python/Element.Category+.py
|
1d9538abcbb641afdd57cccc319aef7a91d65b65
|
[
"MIT"
] |
permissive
|
ptrklk/ClockworkForDynamo
|
21ae4ab4ab70d02b6d706f16f312865cd73c4ace
|
90293d8fb74e6b3339acd6ca4ff69f695b6a02ac
|
refs/heads/master
| 2020-03-26T07:50:47.053264
| 2018-07-28T07:55:10
| 2018-07-28T07:55:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetCategory(item):
objtype = item.GetType().ToString()
if objtype == "Autodesk.Revit.DB.ViewSchedule": return Revit.Elements.Category.ById(item.Definition.CategoryId.IntegerValue)
elif objtype == "Autodesk.Revit.DB.Family": return Revit.Elements.Category.ById(item.FamilyCategoryId.IntegerValue)
elif objtype == "Autodesk.Revit.DB.GraphicsStyle": return Revit.Elements.Category.ById(item.GraphicsStyleCategory.Id.IntegerValue)
elif objtype == "Autodesk.Revit.DB.Category":
if item.Parent: return Revit.Elements.Category.ById(item.Parent.Id.IntegerValue)
else: return None
elif hasattr(item, "Category"): return Revit.Elements.Category.ById(item.Category.Id.IntegerValue)
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetCategory(x) for x in items]
else: OUT = GetCategory(items)
|
[
"dieckmann@caad.arch.rwth-aachen.de"
] |
dieckmann@caad.arch.rwth-aachen.de
|
f63cba311274c50550ab3646cb08e9203bacea0f
|
9d961bd6a590cc96db0c1f9c72d84e3a66636edf
|
/심심풀이땅콩/[백준]2920.py
|
66cc611eced9e2f94957f850a848db3dff36e886
|
[] |
no_license
|
0equal2/Python_Programming
|
bae65338929e8e1a88247b8d23de805caa026702
|
2ac1d0262320220f49cbdb45e787e55e994d0b0f
|
refs/heads/master
| 2023-05-14T22:13:41.583214
| 2021-06-09T03:04:51
| 2021-06-09T03:04:51
| 304,628,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
###[백준]2920
info=list(map(str,input().split()))
info="".join(info)
if info=="12345678":
print("ascending")
elif info=="87654321":
print("descending")
else:
print("mixed")
|
[
"duddms0115@gmail.com"
] |
duddms0115@gmail.com
|
6a726761b329c956b3768f904b22bebfcb704b34
|
2359121ebcebba9db2cee20b4e8f8261c5b5116b
|
/configs_pytorch/f30_pt.py
|
ab367f0af0238b8e8079026ca000938067363cfa
|
[] |
no_license
|
EliasVansteenkiste/plnt
|
79840bbc9f1518c6831705d5a363dcb3e2d2e5c2
|
e15ea384fd0f798aabef04d036103fe7af3654e0
|
refs/heads/master
| 2021-01-20T00:34:37.275041
| 2017-07-20T18:03:08
| 2017-07-20T18:03:08
| 89,153,531
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,549
|
py
|
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 3,
'n_labels': 17}
#only lossless augmentations
p_augmentation = {
'rot90_values': [0,1,2,3],
'flip': [0, 1]
}
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
x = data_transforms.lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 16
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=5)
print len(folds)
train_ids = folds[0] + folds[1] + folds[2] + folds[3]
valid_ids = folds[4]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test-jpg',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2-jpg',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 40
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(1 * nchunks_per_epoch)
learning_rate_schedule = {
0: 1e-4,
int(max_nchunks * 0.4): 3e-5,
int(max_nchunks * 0.6): 1e-5,
int(max_nchunks * 0.7): 5e-6,
int(max_nchunks * 0.8): 2e-6,
int(max_nchunks * 0.9): 1e-6
}
# model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.resnet = torchvision.models.resnet34(pretrained=True)
self.resnet.fc = nn.Linear(self.resnet.fc.in_features, p_transform["n_labels"])
def forward(self, x):
x = self.resnet(x)
x_softmax = F.softmax(x.narrow(1,0,4))
x_sigmoid = F.sigmoid(x.narrow(1,4,p_transform['n_labels']-4))
x = torch.cat([x_softmax,x_sigmoid],1)
return x
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
softmax_loss = torch.sum(-torch.log(input.narrow(1,0,4)+1e-7)*target.narrow(1,0,4))
binary_loss = F.binary_cross_entropy(input.narrow(1,4,p_transform['n_labels']-4),target.narrow(1,4,p_transform['n_labels']-4),weight=None,size_average=False)
return (binary_loss+softmax_loss)/p_transform["n_labels"]
def build_objective():
return MultiLoss()
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.Adam(model.parameters(), lr=learning_rate)
|
[
"frederic.godin@ugent.be"
] |
frederic.godin@ugent.be
|
4e3dbf1d5e5f1322b18d30e741a4ecefa93b1978
|
e7795082c0131682803a09e929a86b2deddeab74
|
/common/PackageRequest.py
|
55551a9dd599fd01e2d21b48e965acf577cdcab7
|
[
"MIT"
] |
permissive
|
liwanlei/FXTest
|
01de3ad55849b16c49d93b58d1aae21fd0fdafa0
|
aeda58d01c14194290ca149d411c3a8596cca82d
|
refs/heads/master
| 2023-04-01T15:45:26.668688
| 2023-03-19T05:19:54
| 2023-03-19T05:19:54
| 97,098,845
| 807
| 419
|
MIT
| 2022-04-23T06:52:16
| 2017-07-13T08:27:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
# -*- coding: utf-8 -*-
# @Date : 2017-07-20 15:13:25
# @Author : lileilei
'''
requets模块的简单的封装
'''
import requests, json
from config import Interface_Time_Out
from requests import exceptions
from common.systemlog import logger
class reques():
def get(self, url, headers, parms): # get消息
try:
self.r = requests.get(url, headers=headers, params=parms, timeout=Interface_Time_Out)
self.r.encoding = 'UTF-8'
spend = self.r.elapsed.total_seconds()
json_response = json.loads(self.r.text)
return json_response, spend
except exceptions.Timeout:
logger.exception(exceptions.Timeout)
return {'get请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.exception(exceptions.InvalidURL)
return {'get请求出错': "非法url"}
except exceptions.HTTPError:
logger.exception(exceptions.HTTPError)
return {'get请求出错': "http请求错误"}
except Exception as e:
logger.exception(e)
return {'get请求出错': "错误原因:%s" % e}
def post(self, url, params, headers): # post消息
data = json.dumps(params)
try:
reponse = requests.post(url,
data=data,
timeout=Interface_Time_Out,
headers=headers)
if reponse.status_code != 200:
return {'post请求出错': "状态码返回不是200"}
json_response = json.loads(reponse.text)
spend = reponse.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'post请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'post请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'post请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'post请求出错': "错误原因:%s" % e}
def put(self, url, params, headers): # post消息
data = json.dumps(params)
try:
reponse = requests.put(url,
data=data,
timeout=Interface_Time_Out,
headers=headers)
if reponse.status_code != 200:
logger.info(reponse.status_code)
return {'put请求出错': "状态码返回不是200"}
json_response = json.loads(reponse.text)
spend = reponse.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'put请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'put请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'put请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'put请求出错': "错误原因:%s" % e}
def delfile(self, url, params, headers): # 删除的请求
try:
self.rdel_word = requests.delete(url, data=params,
headers=headers,
timeout=Interface_Time_Out)
json_response = json.loads(self.rdel_word.text)
spend = self.rdel_word.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'delete请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'delete请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'delete请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'delete请求出错': "错误原因:%s" % e}
def putfile(self, url, params, headers): # put请求
try:
self.rdata = json.dumps(params)
me = requests.put(url, self.rdata, headers=headers,
timeout=Interface_Time_Out)
json_response = json.loads(me.text)
spend = me.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.exception(exceptions.Timeout)
return {'put请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.exception(exceptions.InvalidURL)
return {'put请求出错': "非法url"}
except exceptions.HTTPError:
logger.exception(exceptions.HTTPError)
return {'put请求出错': "http请求错误"}
except Exception as e:
logger.exception(e)
return {'put请求出错': "错误原因:%s" % e}
|
[
"leileili126@163.com"
] |
leileili126@163.com
|
bc02bd5d06610caebebf2ffa034ff85777eb4b1d
|
d51e67abc7709b17664ee08eb9d68ecb938f00a9
|
/utils/farmbot.py
|
1ace2bcc577c12cf3db231e381d3c13f6f25e1c8
|
[] |
no_license
|
averdier/farmware_watering_farmdesign
|
bb6d3d2615c8ac5294e3bbd61cea97cf0950e05c
|
d474d78629504907971c7f7b2ee6b88954f6f4cf
|
refs/heads/master
| 2020-03-30T09:26:40.012409
| 2018-10-02T09:16:04
| 2018-10-02T09:16:04
| 151,075,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
# -*- coding: utf-8 -*-
import uuid
from time import time, sleep
from .api import send_celery_script, log, get_resource
from .geometry import Point3D
def prepare_move_absolute_script(position, speed):
return {
'kind': 'rpc_request',
'args': {
'label': 'farmware_circle_' + str(uuid.uuid4())
},
'body': [{
'kind': 'move_absolute',
'args': {
'location': {
'kind': 'coordinate',
'args': {
'x': position.x,
'y': position.y,
'z': position.z
}
},
'offset': {
'kind': 'coordinate',
'args': {
'x': 0,
'y': 0,
'z': 0
}
},
'speed': speed
}
}]
}
class FarmBot:
@property
def position(self):
response = get_resource('/api/v1/bot/state')
if response.status_code != 200:
raise RuntimeError('Unable to get position')
data = response.json()['location_data']['position']
return Point3D(data['x'], data['y'], data['z'])
def move(self, position, speed, tolerance, timeout):
target = Point3D(
int(position.x),
int(position.y),
int(position.z)
)
log('target position: ' + str(target), 'debug')
celery_move_script = prepare_move_absolute_script(target, speed)
current_position = self.position
send_celery_script(celery_move_script)
t0 = time()
while not target == current_position:
new_position = self.position
if new_position == current_position:
if new_position.is_near(target, tolerance):
break
else:
t1 = time()
if t1 - t0 > timeout:
if not new_position.is_near(target, tolerance):
raise RuntimeError('Movement timeout')
else:
break
else:
current_position = new_position
t0 = time()
sleep(0.5)
|
[
"a.verdier@outlook.fr"
] |
a.verdier@outlook.fr
|
2b9bacb7050fe1eb24ece4e8eb572e2926d53cb4
|
4f340ed313a67bddf077a9d8da2205b9c2feb29a
|
/emv/util.py
|
dd9f8ac0b99ed0b904b87cec4e819cd092561dd2
|
[
"MIT"
] |
permissive
|
russss/python-emv
|
49b645be9ec60acc8cd23e95e5423a1b2c90a175
|
ce316ba05165e9b82026f72b55329b9bc287c0b8
|
refs/heads/main
| 2023-06-21T19:09:10.124472
| 2023-06-01T18:44:34
| 2023-06-11T15:26:11
| 76,971,486
| 105
| 30
|
MIT
| 2023-06-11T15:26:12
| 2016-12-20T16:10:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
import re
def hex_int(val):
"""Convert an integer into a decimal-encoded hex integer as bytes,
which the EMV spec seems awfully keen on.
>>> hex_int(123456)
[0x12, 0x34, 0x56]
>>> hex_int(65432)
[0x06, 0x54, 0x32]
"""
s = str(val)
if len(s) % 2 != 0:
s = "0" + s
return [int(s[i : i + 2], 16) for i in range(0, len(s), 2)]
def from_hex_int(val):
"""Convert hex digits to decimal.
>>> from_hex_int([0x12, 0x34])
1234
"""
return int("".join(["%02x" % i for i in val]).rstrip("f"))
def from_hex_date(val):
return "%02x/%02x/%02x" % (val[0], val[1], val[2])
def decode_int(val):
result = val[0]
for i in val[1:]:
result = result << 8
result += i
return result
def format_bytes(data):
if type(data) == int:
return "[%02X]" % data
return "[" + " ".join(["%02X" % i for i in data]) + "]"
def unformat_bytes(data):
data = re.split(r"(?:\s+|:)", data)
return [int(i, 16) for i in data]
def bit_set(value, bit):
mask = 1 << bit
return (value & mask) == mask
|
[
"russ@garrett.co.uk"
] |
russ@garrett.co.uk
|
1b392e96dea3bc4c7baebfd90006596440762416
|
30ba09b0a746b1cef4c817fd87f1f310904a6d88
|
/test/test_dialogs/test_title_screen.py
|
770f143c244c1bfcf20b45cf482ac9f12c0d5667
|
[
"MIT",
"Artistic-1.0-Perl"
] |
permissive
|
krother/tilegamelib
|
69e610959294ed950f49fefd8e7d50ceb1ba09ed
|
a8165f27b166acca37f81e8432a70e0b2e028338
|
refs/heads/master
| 2022-07-21T20:07:35.031330
| 2022-07-10T20:18:19
| 2022-07-10T20:18:19
| 1,045,043
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from unittest import TestCase, main
from conftest import MENU_RECT, TITLE_IMAGE, TITLE_RECT
from tilegamelib.dialogs.title_screen import TitleScreen
from tilegamelib.events import QUIT_EVENT, EventGenerator
from tilegamelib.menu import VERTICAL_MOVES
class TitleScreenTests(TestCase):
def setUp(self):
self.events = EventGenerator()
self.result = ''
def one(self):
self.result += '1'
def two(self):
self.result += '2'
def three(self):
self.result += '3'
def test_show_title(self, screen):
"""Displays a main menu."""
menu = [
('One', self.one),
('Two', self.two),
('Three', self.three),
]
title = TitleScreen(screen, self.events,
TITLE_RECT, TITLE_IMAGE, menu, MENU_RECT, VERTICAL_MOVES)
self.events.add_scripted_keys([K_UP, K_RETURN, K_DOWN, K_DOWN,
K_SPACE, K_RETURN, K_UP, K_RETURN, K_UP], converter=int)
self.events.add_scripted_event(QUIT_EVENT)
title.run()
self.assertEqual(self.result, '3')
if __name__ == '__main__':
main()
|
[
"krother@academis.eu"
] |
krother@academis.eu
|
3742ff611dc02b777b4f83d39a8aade1e7dc7cfc
|
a2cd609a52eb5be16a248c054fb014394f12d344
|
/build/globalplanner/catkin_generated/pkg.installspace.context.pc.py
|
a04cf5b45702ff3cf2cd28d70712504d7506b0de
|
[] |
no_license
|
rfzeg/simon_thesis_ws
|
c5e6d6b20ee63010ffede91d17ba144527e5f6c5
|
dc79635f628dade14cab1a631cc4eb24aee1762c
|
refs/heads/master
| 2021-09-16T12:43:41.270235
| 2018-06-20T12:40:57
| 2018-06-20T12:40:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/simoneforno/simon_ws/install/include".split(';') if "/home/simoneforno/simon_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;pluginlib;costmap_2d;base_local_planner;nav_core".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lglobalplanner".split(';') if "-lglobalplanner" != "" else []
PROJECT_NAME = "globalplanner"
PROJECT_SPACE_DIR = "/home/simoneforno/simon_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"s.forno@student.tue.nl"
] |
s.forno@student.tue.nl
|
6ba857f45be0a377186287819331c0f00d600dc2
|
411eff94020c192d5e5f657fa6012232ab1d051c
|
/game/src/coginvasion/attack/LinearProjectile.py
|
f5e3003258b6aada6e2dd3d56c10f9ceea59b384
|
[] |
no_license
|
xMakerx/cio-src
|
48c9efe7f9a1bbf619a4c95a4198aaace78b8491
|
60b2bdf2c4a24d506101fdab1f51752d0d1861f8
|
refs/heads/master
| 2023-02-14T03:12:51.042106
| 2021-01-15T14:02:10
| 2021-01-15T14:02:10
| 328,268,776
| 1
| 0
| null | 2021-01-15T15:15:35
| 2021-01-09T23:51:37
|
Python
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
from src.coginvasion.attack.BaseProjectile import BaseProjectile
from src.coginvasion.attack.LinearProjectileShared import LinearProjectileShared
class LinearProjectile(BaseProjectile, LinearProjectileShared):
def __init__(self, cr):
BaseProjectile.__init__(self, cr)
LinearProjectileShared.__init__(self)
def onSpawn(self):
self.playLinear()
def disable(self):
LinearProjectileShared.cleanup(self)
BaseProjectile.disable(self)
|
[
"maverick.liberty29@gmail.com"
] |
maverick.liberty29@gmail.com
|
a6cac7d77145dd89deb708f5be1407ffe096d320
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_suffocated.py
|
8b356fe7d1d8ac09890ee20e1e5dc5a7719a80ba
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from xai.brain.wordbase.verbs._suffocate import _SUFFOCATE
#calss header
class _SUFFOCATED(_SUFFOCATE, ):
def __init__(self,):
_SUFFOCATE.__init__(self)
self.name = "SUFFOCATED"
self.specie = 'verbs'
self.basic = "suffocate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
280b60d0e3b097e45c49ec6db0db8f323a43d3c5
|
9568dee77459304ad0f7e01c9dea9432c11377d0
|
/maxlike_sim_filehandler.py
|
c91d4799c888691d93117cefde6a1784acc6f66d
|
[
"MIT"
] |
permissive
|
lbaumo/wtgpipeline
|
c101c7e7ec1491a1c40cbe14102662770641bb9a
|
73de01736e33769c09c4467e3c040545d7070407
|
refs/heads/master
| 2021-06-20T14:40:38.263891
| 2017-08-14T21:08:24
| 2017-08-14T21:08:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
#############################
# Handles loading files for a simulation run
#############################
import ldac, cPickle, numpy as np
import astropy.io.fits as pyfits
import pdzfile_utils, nfwutils, varcontainer
#############################
__cvs_id__ = "$Id$"
#############################
class SimFilehandler(object):
###############
def addCLOps(self, parser):
parser.add_option('-i', '--inputcat', dest='inputCatFile',
help='Simulation format cat containing shape info')
parser.add_option('-b', '--bpz', dest='inputBPZ',
help='BPZ file objects were drawn from')
parser.add_option('-p', '--pdzfile', dest='inputPDZ',
help='Simulation PDZ file')
#############################
def createOptions(self, inputCatFile, inputBPZ, inputPDZ, options = None, args = None):
if options is None:
options = varcontainer.VarContainer()
options.inputCatFile = inputCatFile
options.inputBPZ = inputBPZ
options.inputPDZ = inputPDZ
return options, args
#############################
def readData(self, manager):
options = manager.options
manager.open('inputcat', options.inputCatFile, ldac.openObjectFile)
manager.concentration = manager.inputcat.hdu.header['CONCEN']
manager.zcluster = manager.inputcat.hdu.header['Z']
manager.store('r500', nfwutils.rdelta, manager.inputcat.hdu.header['R_S'],
manager.concentration, 500)
bpz = ldac.openObjectFile(options.inputBPZ, 'STDTAB')
if bpz is None:
bpz = ldac.openObjectFile(options.inputBPZ, 'COS30PHOTZ')
manager.matchedBPZ = bpz.matchById(manager.inputcat, 'z_id')
bpz = manager.matchedBPZ
newcols = [pyfits.Column(name = 'z_b', format = 'E', array = bpz['BPZ_Z_B']),
pyfits.Column(name='odds', format = 'E', array = bpz['BPZ_ODDS']),
pyfits.Column(name='z_t', format = 'E', array = bpz['BPZ_T_B'])]
inputcat = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs(newcols) + manager.inputcat.hdu.columns))
manager.replace('inputcat', inputcat)
manager.open('pdzmanager', options.inputPDZ, pdzfile_utils.PDZManager.open)
pdzrange, associatedPDZs = manager.pdzmanager.associatePDZ(manager.inputcat['z_id'])
pdzrange = pdzrange.astype(np.float64)
associatedPDZs = associatedPDZs.astype(np.float64)
manager.pdzrange = pdzrange
manager.pdz = associatedPDZs
manager.replace('pdzmanager', None)
|
[
"dapple@xoc7.stanford.edu"
] |
dapple@xoc7.stanford.edu
|
adeb59d010bdfae9169f837cf998303af2c9df2e
|
463c053bcf3f4a7337b634890720ea9467f14c87
|
/rllib/models/jax/misc.py
|
02ebd98fd261b684769013ef6b2f7fd3ba31bc62
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
pdames/ray
|
e8faddc4440976211a6bcead8f8b6e62c1dcda01
|
918d3601c6519d333f10910dc75eb549cbb82afa
|
refs/heads/master
| 2023-01-23T06:11:11.723212
| 2022-05-06T22:55:59
| 2022-05-06T22:55:59
| 245,515,407
| 1
| 1
|
Apache-2.0
| 2023-01-14T08:02:21
| 2020-03-06T20:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
import time
from typing import Callable, Optional
from ray.rllib.utils.framework import get_activation_fn, try_import_jax
jax, flax = try_import_jax()
nn = np = None
if flax:
import flax.linen as nn
import jax.numpy as np
class SlimFC:
"""Simple JAX version of a fully connected layer."""
def __init__(
self,
in_size,
out_size,
initializer: Optional[Callable] = None,
activation_fn: Optional[str] = None,
use_bias: bool = True,
prng_key: Optional[jax.random.PRNGKey] = None,
name: Optional[str] = None,
):
"""Initializes a SlimFC instance.
Args:
in_size (int): The input size of the input data that will be passed
into this layer.
out_size (int): The number of nodes in this FC layer.
initializer (flax.:
activation_fn (str): An activation string specifier, e.g. "relu".
use_bias (bool): Whether to add biases to the dot product or not.
#bias_init (float):
prng_key (Optional[jax.random.PRNGKey]): An optional PRNG key to
use for initialization. If None, create a new random one.
name (Optional[str]): An optional name for this layer.
"""
# By default, use Glorot uniform initializer.
if initializer is None:
initializer = nn.initializers.xavier_uniform()
self.prng_key = prng_key or jax.random.PRNGKey(int(time.time()))
_, self.prng_key = jax.random.split(self.prng_key)
# Create the flax dense layer.
self._dense = nn.Dense(
out_size,
use_bias=use_bias,
kernel_init=initializer,
name=name,
)
# Initialize it.
dummy_in = jax.random.normal(self.prng_key, (in_size,), dtype=np.float32)
_, self.prng_key = jax.random.split(self.prng_key)
self._params = self._dense.init(self.prng_key, dummy_in)
# Activation function (if any; default=None (linear)).
self.activation_fn = get_activation_fn(activation_fn, "jax")
def __call__(self, x):
out = self._dense.apply(self._params, x)
if self.activation_fn:
out = self.activation_fn(out)
return out
|
[
"noreply@github.com"
] |
pdames.noreply@github.com
|
08b869e8d8559db4d477d360515bc7c627507ca5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03129/s302066927.py
|
0887597072b00c7f90e5a7d87138c543cb18fc6f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
import sys
N, K = map(int, input().split())
ans = 0
for i in range(1,N+1,2):
ans = ans + 1
if ans >= K:
print("YES")
sys.exit()
print("NO")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
77ec1a307afebd5162f2ea1d4eaaece759d3fd15
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5753053697277952_0/Python/BIPUL/a.py
|
3b8209e6b92d12243001a0707948776e81fe2d81
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
############### Author: Bipul Ranjan @ranjanbipul ###############
import sys
import time
import os
import math
import operator
import random
from functools import lru_cache
from decimal import Decimal as D
from fractions import Fraction as F
#sys.setrecursionlimit(10000)
#@lru_cache(maxsize=None)
MOD = 1000000007
################################################################
QNO = 'a' #SET QUESTION NUMBER
FIN,FOUT = QNO+'.in.txt',QNO+'.out.txt'
FIN = QNO.capitalize()+'-small-attempt0.in'
#FIN = QNO+'.sample.txt'
#FIN = QNO.capitalize()+'-large.in'
fin = open(FIN)
fout = open(FOUT,'w')
sys.stdin = fin
######################## PROGRAM START ##########################
def solve(a,n):
return len(a)
for nu in range(int(input())):
n = int(input())
a = [int(i) for i in input().strip().split(" ")]
t = 0
for i in a: t+=i
print("Case #{0}:".format(nu+1),file=fout,end=" ")
while t>0:
#print(t)
s = []
if t==2:
for i in range(n):
if a[i]==1:
s.append(i)
t-=1
a[i]-=1
if len(s)==2: break
elif t==3:
for i in range(n):
if a[i]==1:
s.append(i)
t-=1
a[i]-=1
break
else:
m = 0
for i in range(1,n):
if a[i]>a[m]: m =i
s.append(m)
t-=1
a[m]-=1
m = 0
for i in range(1,n):
if a[i]>a[m]: m =i
s.append(m)
t-=1
a[m]-=1
s = [chr(i+65) for i in s]
print("{0}".format("".join(s)),file=fout,end=" ")
print("",file=fout)
######################## PROGRAM END #############################
fin.close()
fout.close()
print("Program complete")
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
ac6d4db8c9c186c23b51f9690ba1d838c76936cc
|
b9662213e143acea87e9af6f41143c50f23bec95
|
/main.py
|
3bcc258a67bac8bb863e85ccfce47cd210cce045
|
[] |
no_license
|
elprofedotti/python-mvc
|
3442680dc20d7cb0ec9c3bf09442daecda4c8e4e
|
9beee06de2e7a04ad872e037157a08dd2bb3dcc6
|
refs/heads/main
| 2023-08-04T08:17:03.886448
| 2021-04-20T12:57:17
| 2021-04-20T12:58:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
"""Entry point."""
from app.app import Application
from app.views import MainPage
from app.models import Book
if __name__ == "__main__":
app = Application(MainPage(Book.list()))
app.run()
|
[
"mbriolet.ma@gmail.com"
] |
mbriolet.ma@gmail.com
|
235c672831f5d93373e32f38e38f5655cf5ab225
|
fd48fba90bb227017ac2da9786d59f9b9130aaf0
|
/digsby/src/contacts/BuddyListElement.py
|
f33b3380035eee14ea46a75e7f98d16ccdad3ccf
|
[
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
niterain/digsby
|
bb05b959c66b957237be68cd8576e3a7c0f7c693
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
refs/heads/master
| 2021-01-18T10:07:10.244382
| 2013-11-03T02:48:25
| 2013-11-03T02:48:25
| 5,991,568
| 1
| 0
| null | 2013-11-03T02:48:26
| 2012-09-28T02:24:50
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
from util.primitives.funcs import isiterable
from common.actions import ActionMeta
class BuddyListElement(object):
__metaclass__ = ActionMeta
@property
def num_online(self):
from Contact import Contact
if isiterable(self) and not isinstance(self, Contact):
return sum(elt.num_online for elt in self)
else:
return int(self.online)
def find(self, obj):
assert isinstance(self, list)
return list.find(self, obj)
def chat(self):
import gui.imwin, wx
wx.CallAfter(lambda: gui.imwin.begin_conversation(self))
|
[
"mdougherty@tagged.com"
] |
mdougherty@tagged.com
|
0a67f283ef42dca44daf76bbc4a66abe4d8c48dd
|
951a84f6fafa763ba74dc0ad6847aaf90f76023c
|
/Solu86.py
|
e7e90402710c612a3e2d5f06db28be58e7c62afc
|
[] |
no_license
|
SakuraGo/leetcodepython3
|
37258531f1994336151f8b5c8aec5139f1ba79f8
|
8cedddb997f4fb6048b53384ac014d933b6967ac
|
refs/heads/master
| 2020-09-27T15:55:28.353433
| 2020-02-15T12:00:02
| 2020-02-15T12:00:02
| 226,550,406
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
# # 86. 分隔链表
# 给定一个链表和一个特定值 x,对链表进行分隔,使得所有小于 x 的节点都在大于或等于 x 的节点之前。
#
# 你应当保留两个分区中每个节点的初始相对位置。
#
# 输入: head = 1->4->3->2->5->2, x = 3
# 输出: 1->2->2->4->3->5
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
dummy = ListNode(-1)
pre = dummy
dummy.next = head
if head is None:
return head
while pre.next is not None and pre.next.val < x:
print(pre.val)
pre = pre.next
if pre.next is None:
return head
flag = pre.next
pre11 = flag
cur = flag.next
while cur is not None:
print(cur.val)
if cur.val >= x:
pre11 = cur
cur = cur.next
else:
pre.next = cur
pre11.next = cur.next
cur.next = flag
cur = pre11.next
pre = pre.next
return dummy.next
|
[
"452681917@qq.com"
] |
452681917@qq.com
|
782ce93a0a256fec41cbd6777146f5c3cd2c5695
|
b3586235dc1e1acbd49fab996f581269a808480b
|
/sistema/planeamento/migrations/0013_ordemproducao_num_paletes_stock_in.py
|
1fcbb090362b7154911b2ca6ccc20eb7e102cd64
|
[] |
no_license
|
gonfersilva/Sistema
|
37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3
|
4c6d9ade22040972efbe892eae0130939d7b5c46
|
refs/heads/master
| 2021-10-23T23:21:51.262723
| 2021-10-13T19:45:49
| 2021-10-13T19:45:49
| 155,545,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 2.2.7 on 2020-08-06 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('planeamento', '0012_auto_20200805_1401'),
]
operations = [
migrations.AddField(
model_name='ordemproducao',
name='num_paletes_stock_in',
field=models.IntegerField(default=0, verbose_name='Nº de paletes de stock inseridas'),
),
]
|
[
"goncalo.silva@elastictek.com"
] |
goncalo.silva@elastictek.com
|
095c6844b796a1aa2773a69bc20447cb7d6c0cd5
|
efe67da7ca1092e033fba7b0a251a43d9a165022
|
/get_city_from_id/views.py
|
5d69da18fd281507a348b06021c952f6c6f4bd2e
|
[] |
no_license
|
poojapauskar/savmytime-api
|
0a019077720ab9ec9bd113379e77f0f4c83ef887
|
2bb48d60fce24888c2d4e4ba3b1da8947242a1fd
|
refs/heads/master
| 2021-01-21T13:29:41.086409
| 2016-05-09T06:38:54
| 2016-05-09T06:38:54
| 54,106,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
from services.models import Services
from category.models import Category
from sub_category.models import Sub_category
from cities.models import Cities
from get_details.serializers import Get_detailsSerializer
from rest_framework import generics
# from ticket.permissions import IsOwnerOrReadOnly
# from rest_framework import permissions
from django.shortcuts import get_object_or_404
from django.db.models import Count
from django.http import JsonResponse
# class Get_listList(generics.ListCreateAPIView):
# queryset = Ticket.objects.all()
# serializer_class = Get_listSerializer
# permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class StatusCode(object):
OK = 200
NOT_FOUND = 404
# add more status code according to your need
import json
from django.http import HttpResponse
def JSONResponse(data = None, status = StatusCode.OK):
if data is None:
return HttpResponse(status)
if data and type(data) is dict:
return HttpResponse(json.dumps(data, indent = 4, encoding = 'utf-8', sort_keys = True), \
mimetype = 'application/json', status = status)
else:
return HttpResponse(status = StatusCode.NOT_FOUND)
from django.views import generic
from django.views.generic import ListView
class CustomListView(ListView):
#paginate_by = 2
def get(self, request, *args, **kwargs):
import sys
# print >> sys.stderr, service_id
id1=self.kwargs['id']
objects=list(Cities.objects.filter(id=id1).values('city'))
return JsonResponse(objects,safe=False)
|
[
"git.poojapauskar@gmail.com"
] |
git.poojapauskar@gmail.com
|
1b6e511387f944e0ce53157aa60363c6551647e9
|
2a4290c36832e7080faa4104d58083c29ed1ea09
|
/prepro.py
|
244c70dc13f9e7ef94ec033204e0662e264afae7
|
[] |
no_license
|
jerryli27/my_dtn
|
0380e22fb7892f5d46084339a5edb24c8ed5b8c8
|
54b16f403a480c35d5ae331dbbfd0efed53880b9
|
refs/heads/master
| 2021-01-19T17:17:19.787195
| 2017-03-05T21:48:06
| 2017-03-05T21:48:06
| 82,433,878
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
"""
The cat face data comes from https://sites.google.com/site/catdatacollection/data
"""
import numpy as np
import pickle
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
def resize_images(image_arrays, size=[32, 32]):
# convert float type to integer
image_arrays = (image_arrays * 255).astype('uint8')
resized_image_arrays = np.zeros([image_arrays.shape[0]]+size)
for i, image_array in enumerate(image_arrays):
image = Image.fromarray(image_array)
resized_image = image.resize(size=size, resample=Image.ANTIALIAS)
resized_image_arrays[i] = np.asarray(resized_image)
return np.expand_dims(resized_image_arrays, 3)
def save_pickle(data, path):
with open(path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print ('Saved %s..' %path)
def main():
mnist = input_data.read_data_sets(train_dir='mnist')
train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
'y': mnist.train.labels}
test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
'y': mnist.test.labels}
save_pickle(train, 'mnist/train.pkl')
save_pickle(test, 'mnist/test.pkl')
if __name__ == "__main__":
main()
|
[
"jerrylijiaming@gmail.com"
] |
jerrylijiaming@gmail.com
|
d0ecd64d2fe16d6c50d3a51d49b2fca1bdfdb0cd
|
e70b6032024b290e2ba11fa5266ef912d9ac14a2
|
/crits/comments/urls.py
|
bdb50ae5c327a6f98d12bcca1f9b041ce9ffffc0
|
[
"MIT"
] |
permissive
|
ckane/crits
|
105b45c00b6ad85064b8e33ecd12e7474fc84fd6
|
e2f2b71927d08e6432a0e540a0f4634da675ce53
|
refs/heads/master
| 2021-01-17T07:57:01.495393
| 2016-04-11T04:29:35
| 2016-04-11T04:29:35
| 20,622,493
| 2
| 0
| null | 2016-04-11T04:29:36
| 2014-06-08T17:45:07
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
from django.conf.urls import patterns
urlpatterns = patterns('crits.comments.views',
(r'^remove/(?P<obj_id>\S+)/$', 'remove_comment'),
(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', 'add_update_comment'),
(r'^activity/$', 'activity'),
(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$', 'activity'),
(r'^activity/get_new_comments/$', 'get_new_comments'),
(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$', 'comment_search'),
(r'^list/$', 'comments_listing'),
(r'^list/(?P<option>\S+)/$', 'comments_listing'),
)
|
[
"mgoffin@mitre.org"
] |
mgoffin@mitre.org
|
03d3d6b6062452e771dd0f53277a1ff0b3f6c1e9
|
1df7ba55c4b61772c1a31c503e6b8881f1456dc5
|
/untitled9/apps/courses/migrations/0010_auto_20170209_1147.py
|
dfa9d9ea221e6a9ea9b645c7a86fa3dfcace7f37
|
[] |
no_license
|
fzk466569/python-django-pro
|
35918756060fcae375d3c99ea1a6934949b6d605
|
9add086b7a910f255df5b192268f1e117057e053
|
refs/heads/master
| 2021-01-19T13:18:14.141880
| 2017-02-19T12:16:29
| 2017-02-19T12:16:29
| 82,374,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-09 11:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0009_auto_20170209_1146'),
]
operations = [
migrations.AlterField(
model_name='course',
name='learn_about',
field=models.CharField(default='', max_length=200, verbose_name='\u901a\u8fc7\u672c\u8bfe\u7a0b\u540e\u80fd\u5b66\u5230\u7684'),
),
]
|
[
"fzk466569"
] |
fzk466569
|
637c9052ec6cfa09517634c6b68099e9a6470ff8
|
7a8fcae483d18e87481443f4476d56b5180459e6
|
/statemachine/registry.py
|
c1632ebc937664e2f22b410a9f4334a197abc83f
|
[
"MIT"
] |
permissive
|
Gariagi/python-statemachine
|
63c67696f57eeda75054df399999bfe7fb21c783
|
7dddc714752ef56dd9e54fe246c0050f40c0ad2d
|
refs/heads/develop
| 2020-03-09T01:04:15.124145
| 2018-04-29T17:53:36
| 2018-04-29T17:53:36
| 128,504,334
| 0
| 0
|
MIT
| 2018-04-29T17:53:37
| 2018-04-07T06:56:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
# coding: utf-8
_REGISTRY = {}
_initialized = False
def register(cls):
_REGISTRY[cls.__name__] = cls
return cls
def get_machine_cls(name):
init_registry()
return _REGISTRY[name]
def init_registry():
global _initialized
if not _initialized:
load_modules(['statemachine', 'statemachines'])
_initialized = True
def load_modules(modules=None):
try:
import django # noqa
except ImportError:
# Not a django project
return
try: # pragma: no cover
from django.utils.module_loading import autodiscover_modules
except ImportError: # pragma: no cover
# Django 1.6 compat to provide `autodiscover_modules`
def autodiscover_modules(module_name):
from django.conf import settings
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
# Attempt to import the app's `module_name`.
try:
import_module('{app}.{module}'.format(app=app, module=module_name))
except Exception:
pass
for module in modules:
autodiscover_modules(module)
|
[
"fgmacedo@gmail.com"
] |
fgmacedo@gmail.com
|
14f178cebd12871b2e29d5b54a2c6d71d47622a4
|
3a1bae5b4a49c8f3050f37c3875a5a1dfd6f8316
|
/bookzen_flask/bookzen.py
|
7c91da763601eb07dd5749b3d1277ba732dc04ae
|
[] |
no_license
|
meomap/bookzen
|
9e86f5d3e63bfbd1e87fae41465115d27ebdd321
|
e764dea45c7a5174b728d225ef2aaef3ed09bc9b
|
refs/heads/master
| 2021-01-19T13:25:23.704102
| 2017-02-17T01:52:26
| 2017-02-17T01:52:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,555
|
py
|
# -*- coding: iso-8859-15 -*-
import json
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr
import smtplib
from flask import Flask, render_template, redirect, url_for
from flask_mongoengine import MongoEngine
from flask_wtf import FlaskForm as Form
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Email
app = Flask(__name__)
app.config.from_pyfile('settings.py')
db = MongoEngine(app)
class Books(db.Document):
name = db.StringField()
name_unidecode = db.StringField()
author = db.StringField()
description = db.StringField()
image_uri = db.StringField()
price = db.StringField()
url = db.StringField()
spider = db.StringField()
server = db.StringField()
project = db.StringField()
date = db.DateTimeField()
meta = {'indexes': [
{'fields': ['$name', "$name_unidecode"]}]}
class SearchForm(Form):
flash_msg = "Please search something so we can serve you"
search = StringField("Search book\'s title", validators=[DataRequired(flash_msg)])
submit = SubmitField()
class ContactForm(Form):
flash_msg = "Oops, look like you forget to fill this field."
name = StringField("Name", [DataRequired(flash_msg)])
email = StringField("Email", [Email(flash_msg)])
subject = StringField("Subject", [DataRequired(flash_msg)])
message = TextAreaField("Message", [DataRequired(flash_msg)])
submit = SubmitField()
def str_handler(string):
if isinstance(string, str):
return json.dumps(string)
elif isinstance(string, unicode):
return '''\"{0}\"'''.format(string.encode('utf-8'))
@app.route('/', methods=["GET", "POST"])
def index():
form = SearchForm()
if form.validate_on_submit():
keyword = form.search.data
return redirect(url_for('search', keyword=keyword))
else:
return render_template('index.html', form=form)
@app.route('/search/<keyword>')
def search(keyword):
form = SearchForm()
if form.validate_on_submit():
keyword = form.search.data
return redirect(url_for('search', keyword=keyword))
query = Books.objects.search_text(str_handler(keyword))
books = [dict(json.loads(i.to_json())) for i in query.order_by('+price')]
if books:
return render_template('results.html', form=form, books=books)
else:
return render_template('not_found.html', form=form)
@app.route('/contact/', methods=["GET", "POST"])
def contact():
form = ContactForm()
if form.validate_on_submit():
msg = MIMEMultipart()
fromaddr = form.email.data
toaddr = app.config["MY_EMAIL_ADDRESS"]
msg['subject'] = form.subject.data
msg['from'] = formataddr((str(Header(form.name.data, 'utf-8')), fromaddr))
msg['to'] = toaddr
msg['reply-to'] = fromaddr
body = form.message.data
msg.attach(MIMEText(body, 'plain', 'utf-8'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(app.config['EMAIL_ACCOUNT'], app.config["EMAIL_PASSWORD"])
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
return render_template('thanks.html')
else:
return render_template('contact.html', form=form)
@app.route('/about/')
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
[
"tu0703@gmail.com"
] |
tu0703@gmail.com
|
0f260f1dbdd72e3b8c0677de922da98c7967e15b
|
bb7ebdd990d3265a585700e1083b3e916842aec6
|
/scrapper/article.py
|
c5d9ffb192e8310d3615e63aaf29c76355f4090d
|
[] |
no_license
|
c24b/clean_crawtext
|
2c2a6d6292f5bb00afa45ebb28ba16bf2f0a229e
|
f1dc20298da418680b1b96dd0468846cbf09a112
|
refs/heads/master
| 2020-05-18T00:58:55.308025
| 2014-08-19T11:50:52
| 2014-08-19T11:50:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from copy import deepcopy
from parsers import Parser
from cleaners import StandardDocumentCleaner
from formatters import StandardOutputFormatter
from extractors import StandardContentExtractor
import datetime
from BeautifulSoup import BeautifulSoup as bs
class Extractor(object):
'''Generic Extractor'''
@staticmethod
def run( url, raw_html,type, lang="en"):
if type == "article":
content = Article(url, raw_html, lang)
elif type == "defaut":
raise NotImplementedError
else:
raise NotImplementedError
return content.get()
class Article(Extractor):
'''Article'''
def __init__(self, url, raw_html, lang):
self.status = True
self.url = url
self.lang = lang
# title of the article
self.title = None
#text
self.article = u""
self.cleaned_text = u""
# meta
self.meta_description = u""
self.meta_lang = u""
self.meta_favicon = u""
self.meta_keywords = u""
#link and domain
self.canonical_link = u""
self.domain = u""
# cleaned text
self.top_node = None
self.tags = set()
self.final_url = url
self.raw_html = raw_html
# the lxml Document object
self.parser = Parser()
self.raw_doc = u""
self.publish_date = None
self.additional_data = {}
self.links = []
self.outlinks = []
self.inlinks = []
self.start_date = datetime.datetime.today()
def get(self):
try:
self.doc = self.parser.fromstring(self.raw_html)
#init extractor method
extractor = StandardContentExtractor(self,"en")
# init the document cleaner
cleaner = StandardDocumentCleaner(self)
# init the output formatter
formatter = StandardOutputFormatter(self, stopwords_class="en")
#doc
#self.doc = doc
self.raw_doc = deepcopy(self.raw_html)
self.title = extractor.get_title()
#self.title = self.title
#meta
self.meta_lang = extractor.get_meta_lang()
#self.meta_favicon = extractor.get_favicon()
#self.meta_description = extractor.get_meta_description()
#self.meta_description = self.meta_description.decode("utf-8")
#self.meta_keywords = extractor.get_meta_keywords()
#domain and url
self.canonical_link = extractor.get_canonical_link()
self.domain = extractor.get_domain()
#~
#~ #tag
#self.tags = extractor.extract_tags()
#~ #text
self.doc = cleaner.clean()
self.top_node = extractor.calculate_best_node()
if self.top_node is not None:
# post cleanup
self.top_node = extractor.post_cleanup(self.top_node)
# clean_text
#self.cleaned_text = formatter.get_formatted_text()
#self.content = self.content.decode("utf-8")
self.links = extractor.get_links()
self.outlinks = extractor.get_outlinks()
try:
self.content = formatter.get_formatted_text()
except Exception as e:
self.content = bs(self.raw_html).text
#self.inlinks, self.inlinks_err = extractor.get_outlinks(self.links)
# TODO
# self.article.publish_date = self.extractor.get_pub_date(doc)
# self.article.additional_data = self.extractor.more(doc)
return self
except Exception as e:
self.status = False
self.logs = {
"url": self.url,
"scope": "article extraction",
"msg": e.args,
"status": False,
"code": -2
}
return self
def repr(self):
return {
"url": self.canonical_link,
"domain": self.domain,
"title": self.title,
"content": self.content,
"description": self.meta_description,
"outlinks": self.outlinks,
"crawl_date": self.start_date,
"raw_html": self.raw_html,
}
def is_relevant(self, query):
self.content = {"title":unicode(self.title), "content": unicode(self.content)}
if query.match(self.content) is False:
self.status = {"url":self.url, "code": -1, "msg": "Not Relevant","status": False, "title": self.title, "content": self.content}
return False
else:
return True
|
[
"4barbes@gmail.com"
] |
4barbes@gmail.com
|
61b4081b28ae6a8a0b66826389ed5b2bf8f6a8bd
|
82b728e805d887102c0b8c415731b353877690cd
|
/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py
|
ff63ecabec79d5b44b37a6226ced8498752c4070
|
[
"Apache-2.0"
] |
permissive
|
geraint0923/python-aiplatform
|
90c7742c9bdbde05b9688b117e8e59c0406d6f85
|
7ab05d5e127636d96365b7ea408974ccd6c2f0fe
|
refs/heads/main
| 2023-08-24T05:30:38.519239
| 2021-10-27T20:38:25
| 2021-10-27T20:38:25
| 370,803,114
| 0
| 0
|
Apache-2.0
| 2021-05-25T19:15:47
| 2021-05-25T19:15:46
| null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDataLabelingJobs
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async]
from google.cloud import aiplatform_v1
async def sample_list_data_labeling_jobs():
"""Snippet for list_data_labeling_jobs"""
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataLabelingJobsRequest(
parent="projects/{project}/locations/{location}",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
async for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async]
|
[
"noreply@github.com"
] |
geraint0923.noreply@github.com
|
90229bbbe4785a5b999ee5d8722e20f28827c56f
|
c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171
|
/daily-coding-problems/problem395.py
|
bf3a13cf875b08ed3da6182d02891d6d297eafa6
|
[] |
no_license
|
carlhinderer/python-exercises
|
c8367517fdf835fa1117f96dbfee3dccc596afa6
|
4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7
|
refs/heads/master
| 2021-06-01T16:17:00.389134
| 2021-02-09T18:21:01
| 2021-02-09T18:21:01
| 150,902,917
| 0
| 0
| null | 2021-04-20T20:33:11
| 2018-09-29T21:03:36
|
Python
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
# Problem 395
# Medium
# Asked by Robinhood
#
# Given an array of strings, group anagrams together.
#
# For example, given the following array:
#
# ['eat', 'ate', 'apt', 'pat', 'tea', 'now']
#
# Return:
#
# [['eat', 'ate', 'tea'],
# ['apt', 'pat'],
# ['now']]
#
|
[
"carl.hinderer4@gmail.com"
] |
carl.hinderer4@gmail.com
|
efec59e8370e4f56814a2820c890bc9dc2fff0bd
|
659836ef3a9ac558538b016dbf4e128aa975ae7c
|
/backend/ingredient/migrations/0001_initial.py
|
4f51e388822ee86c8a34ae068419dd993474fd70
|
[] |
no_license
|
zzerii/save_your_ingredients
|
fda1c769d158bca9dfd3c28ac9ff34ed7ae4e6a3
|
5ebde82255c1a6edf0c19d9032015d05c9d0abc9
|
refs/heads/master
| 2023-02-21T22:19:28.954594
| 2021-01-22T11:39:16
| 2021-01-22T11:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
# Generated by Django 3.1.3 on 2020-11-13 05:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('info', models.CharField(max_length=255)),
('trim', models.CharField(max_length=255)),
],
),
]
|
[
"jinsoo941010@naver.com"
] |
jinsoo941010@naver.com
|
c97da6f4cbe2fab9d85310007193f7f9c0e31396
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_guess_vectors1.py
|
dc797ddfb00b810b4621cc8d73f069c7cf32de02
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
from xcp2k.inputsection import InputSection
from xcp2k.classes._each591 import _each591
class _guess_vectors1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each591()
self._name = "GUESS_VECTORS"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
65b28ee3513376fdfc19b0ce3e8b4f8543856013
|
d737fa49e2a7af29bdbe5a892bce2bc7807a567c
|
/software/qt_examples/src/pyqt-official/webkit/formextractor/ui_formextractor.py
|
518bb9cef3ef753de6499455d980501f40903ab0
|
[
"GPL-3.0-only",
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later"
] |
permissive
|
TG-Techie/CASPER
|
ec47dfbfd6c3a668739ff4d707572e0b853518b4
|
2575d3d35e7dbbd7f78110864e659e582c6f3c2e
|
refs/heads/master
| 2020-12-19T12:43:53.825964
| 2020-01-23T17:24:04
| 2020-01-23T17:24:04
| 235,736,872
| 0
| 1
|
MIT
| 2020-01-23T17:09:19
| 2020-01-23T06:29:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,813
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'formextractor.ui'
#
# Created: Tue May 14 17:59:08 2013
# by: PyQt5 UI code generator 5.0-snapshot-b0831183bf83
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(680, 218)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.webFormGroupBox = QtWidgets.QGroupBox(Form)
self.webFormGroupBox.setObjectName("webFormGroupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.webFormGroupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.webView = QtWebKitWidgets.QWebView(self.webFormGroupBox)
self.webView.setMinimumSize(QtCore.QSize(200, 150))
self.webView.setMaximumSize(QtCore.QSize(400, 16777215))
self.webView.setUrl(QtCore.QUrl("about:blank"))
self.webView.setObjectName("webView")
self.verticalLayout.addWidget(self.webView)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout.addWidget(self.webFormGroupBox)
spacerItem = QtWidgets.QSpacerItem(28, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.dataGroupBox = QtWidgets.QGroupBox(Form)
self.dataGroupBox.setObjectName("dataGroupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.dataGroupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
self.formLayout.setObjectName("formLayout")
self.firstNameLabel = QtWidgets.QLabel(self.dataGroupBox)
self.firstNameLabel.setObjectName("firstNameLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstNameLabel)
self.firstNameEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.firstNameEdit.setReadOnly(True)
self.firstNameEdit.setObjectName("firstNameEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstNameEdit)
self.lastNameLabel = QtWidgets.QLabel(self.dataGroupBox)
self.lastNameLabel.setObjectName("lastNameLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.lastNameLabel)
self.lastNameEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.lastNameEdit.setReadOnly(True)
self.lastNameEdit.setObjectName("lastNameEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lastNameEdit)
self.genderLabel = QtWidgets.QLabel(self.dataGroupBox)
self.genderLabel.setObjectName("genderLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.genderLabel)
self.genderEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.genderEdit.setReadOnly(True)
self.genderEdit.setObjectName("genderEdit")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.genderEdit)
self.updatesLabel = QtWidgets.QLabel(self.dataGroupBox)
self.updatesLabel.setObjectName("updatesLabel")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.updatesLabel)
self.updatesEdit = QtWidgets.QLineEdit(self.dataGroupBox)
self.updatesEdit.setReadOnly(True)
self.updatesEdit.setObjectName("updatesEdit")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.updatesEdit)
self.verticalLayout_3.addLayout(self.formLayout)
spacerItem1 = QtWidgets.QSpacerItem(20, 24, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem1)
self.horizontalLayout.addWidget(self.dataGroupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.webFormGroupBox.setTitle(_translate("Form", "Web Form"))
self.dataGroupBox.setTitle(_translate("Form", "Extracted Data"))
self.firstNameLabel.setText(_translate("Form", "First Name"))
self.lastNameLabel.setText(_translate("Form", "Last Name"))
self.genderLabel.setText(_translate("Form", "Gender"))
self.updatesLabel.setText(_translate("Form", "Receive Updates"))
from PyQt5 import QtWebKitWidgets
|
[
"TGTechie01@gmail.com"
] |
TGTechie01@gmail.com
|
a151f15578260c6246fa532d91a39e1ae25d102d
|
f7bc9ff51518d11d0d21249e57cdbd7277091e18
|
/0x02-python-import_modules/2-args.py
|
7ed9bebdf1babd5e74b51714354869b8faf4b092
|
[] |
no_license
|
veeteeran/holbertonschool-low_level_programming
|
578fd521de625e47406b9141920a531c0483f042
|
ff2f79942eb282ae485deda7d9598eda50723c3f
|
refs/heads/master
| 2020-12-29T04:41:46.509414
| 2020-12-27T14:49:57
| 2020-12-27T14:49:57
| 238,458,078
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
#!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
length = len(argv)
if length == 1:
print("{:d} arguments.".format(length - 1))
elif length == 2:
print("{:d} argument:".format(length - 1))
else:
print("{:d} arguments:".format(length - 1))
for i in range(1, length):
print("{:d}: {}".format(i, argv[i]))
|
[
"vietnamtran@gmail.com"
] |
vietnamtran@gmail.com
|
6dd98b7831f5feee90d9df6b5301c0257eb2e665
|
b972faf032590c9722dc240c45fc60157d5a1bee
|
/(구현)주사위네개.py
|
a7b250ef8517e21ed6b5335a12dac1288a5381af
|
[] |
no_license
|
kih1024/codingStudy
|
3a91b628bc301d1777d954595e93bf1f9246aca3
|
3e8a6fe86d3861613a85d3e75991f4bc7cd1e716
|
refs/heads/master
| 2022-12-09T04:58:55.264433
| 2020-09-22T07:29:44
| 2020-09-22T07:29:44
| 269,874,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# https://www.acmicpc.net/problem/2484
def dice():
li = sorted(list(map(int, input().split())))
temp = set(li)
if len(temp) == 1:
return li[0] * 5000 + 50000
elif len(temp) == 2 and li[1] == li[2]:
return li[1] * 1000 + 10000
elif len(temp) == 2 and li[1] != li[2]:
return (li[1] * 500) + (li[2] * 500) + 2000
elif len(temp) == 3:
for i in range(3):
if li[i] == li[i + 1]:
return li[i] * 100 + 1000
else:
return li[-1] * 100
n = int(input())
# money = []
# for i in range(n):
# li = sorted(list(map(int, input().split())))
# money.append(dice())
# print(max(money))
print(max(dice() for i in range(n)))
|
[
"rladlsgh654@naver.com"
] |
rladlsgh654@naver.com
|
a8140ac64131df0e94f7c4b4e3daa8d4fbc87dbf
|
6d60ac89ee9c14bfc62342f7b33da3932f4eb564
|
/mini_build.py
|
8b0881434026fcc7a4ba9e7ec6986b0b8a269d1d
|
[
"MIT"
] |
permissive
|
samuelcolvin/donkey-simple-old
|
dda5b3c41387231b755965fa982bbb4c845e24c1
|
765810076c01d3677819e4f5a03aefd05300fbda
|
refs/heads/master
| 2021-05-28T19:49:45.269978
| 2014-05-30T17:04:33
| 2014-05-30T17:04:33
| 14,808,774
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
from DonkeySimple.DS.download import download_libraries
import os, re
lib_static_dir = os.path.join('DonkeySimple', 'WebInterface', 'static', 'libs')
libs_json_path = 'static_libraries.json'
try:
download_libraries(libs_json_path, lib_static_dir)
except Exception, e:
print 'ERROR: %s' % str(e)
print 'Problem downloading libraries, you may have problems with internet connection.\n\n'
print 'generating long_descriptions docs for PyPi...'
import pandoc
pandoc.core.PANDOC_PATH = '/usr/bin/pandoc'
doc = pandoc.Document()
readme_file = 'README.md'
doc.markdown = open(readme_file, 'r').read()
docs_file = 'DonkeySimple/docs.txt'
open(docs_file,'w').write(doc.rst)
print '%s converted to rst and written to %s' % (readme_file, docs_file)
print 'changing version number'
setup_text = open('setup.py','r').read()
s=re.search("version='(.+?)'", setup_text)
v = s.groups()[0]
print 'setting version to: %s' % v
init_file = 'DonkeySimple/__init__.py'
init_text = "__version__ = 'v%s'\n" % v
open(init_file,'w').write(init_text)
|
[
"s@muelcolvin.com"
] |
s@muelcolvin.com
|
3b1d6dec03293efd9bdbed97ea34210432f2cbb3
|
228ebc9fb20f25dd3ed2a6959aac41fd31314e64
|
/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py
|
aac9e2bc91fc75d16de26dfd01bb98512f260f25
|
[
"Apache-2.0"
] |
permissive
|
orionnye/python-aiplatform
|
746e3df0c75025582af38223829faeb2656dc653
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
refs/heads/main
| 2023-08-03T06:14:50.689185
| 2021-09-24T03:24:14
| 2021-09-24T03:24:14
| 410,091,957
| 1
| 0
|
Apache-2.0
| 2021-09-24T20:21:01
| 2021-09-24T20:21:00
| null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1beta1.schema.predict.instance',
manifest={
'ImageClassificationPredictionInstance',
},
)
class ImageClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Image Classification.
Attributes:
content (str):
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
supported. - image/jpeg
- image/gif
- image/png
- image/webp
- image/bmp
- image/tiff
- image/vnd.microsoft.icon
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
orionnye.noreply@github.com
|
93576199922965b76e6aae451eccce45bfffccf8
|
47175228ce25812549eb5203fc8b86b76fec6eb9
|
/API_scripts/dfp/dfp_python3/v201408/custom_targeting_service/update_custom_targeting_values.py
|
ff879dd7d5d7d8f3c1653d851a0c2fe2b2a727d5
|
[] |
no_license
|
noelleli/documentation
|
c1efe9c2bdb169baa771e9c23d8f4e2683c2fe20
|
a375698b4cf0776d52d3a9d3c17d20143bd252e1
|
refs/heads/master
| 2021-01-10T05:41:30.648343
| 2016-02-13T05:46:31
| 2016-02-13T05:46:31
| 51,477,460
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of the custom targeting values
belonging to a custom targeting key.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201408')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE customTargetingKeyId = :keyId'
statement = dfp.FilterStatement(query, values)
while True:
# Get custom targeting values by statement.
response = custom_targeting_service.getCustomTargetingValuesByStatement(
statement.ToStatement())
# Update each local custom targeting value object by changing its name.
if 'results' in response:
updated_values = []
for value in response['results']:
if not value['displayName']:
value['displayName'] = value['name']
value['displayName'] += ' (Deprecated)'
updated_values.append(value)
values = custom_targeting_service.updateCustomTargetingValues(
updated_values)
# Display results.
for value in values:
print(('Custom targeting value with id \'%s\', name \'%s\', and display'
' name \'%s\' was updated.'
% (value['id'], value['name'], value['displayName'])))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
if response['totalResultSetSize'] == 0:
print('No custom targeting values were updated.')
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
|
[
"noelle@makermedia.com"
] |
noelle@makermedia.com
|
4e68d6993b53a2ddd25f70df5669610a0df20cf0
|
952762035d7ffa7c46a2c7dc85063f08b1c4a975
|
/2019/23/intcode.py
|
be46113b6662b2f5f65c6203efea2182ca7f225a
|
[] |
no_license
|
pjot/advent-of-code
|
c8a59df25b3c1afa0e14fd22139b9ac3b789ff4d
|
4b8b5c55c44dc8325caa2aeea7aa064a98738fd7
|
refs/heads/master
| 2023-03-10T20:22:44.724300
| 2023-03-04T21:01:02
| 2023-03-04T21:01:02
| 225,183,593
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,708
|
py
|
def parse_file(filename):
with open(filename) as f:
code = f.readline()
ints = code.split(',')
program = [int(i) for i in ints]
return program
def parse_instruction(instruction):
full_instruction = '{:05d}'.format(instruction)
return (
instruction % 100,
int(full_instruction[2]),
int(full_instruction[1]),
int(full_instruction[0]),
)
class Computer:
def __init__(self, program, inputs):
self.position = 0
self.base = 0
self.inputs = inputs
self.tape = {k: v for k, v in enumerate(program)}
self.output = None
def iterate_once(self=None):
self.iterate()
return self.output
def run_to_output(self):
done = False
outputs = []
while not done:
done = self.iterate()
if not done:
outputs.append(self.output)
return outputs
def read(self, delta=0, mode=1):
if mode == 2:
return self.tape.get(
self.base + self.tape.get(self.position + delta, 0),
0
)
if mode == 1:
return self.tape.get(self.position + delta, 0)
if mode == 0:
return self.tape.get(
self.tape.get(self.position + delta, 0),
0
)
def write(self, delta, value, mode):
if mode == 2:
key = self.base + self.tape[self.position + delta]
if mode == 1:
key = self.position + delta
if mode == 0:
key = self.tape[self.position + delta]
self.tape[key] = value
def iterate(self):
while True:
instruction = self.read()
op_code, mode_a, mode_b, mode_c = parse_instruction(instruction)
if op_code == 99:
return True
if op_code == 1:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
self.write(3, a + b, mode_c)
self.position += 4
if op_code == 2:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
self.write(3, a * b, mode_c)
self.position += 4
if op_code == 3:
if self.inputs:
self.write(1, self.inputs.pop(0), mode_a)
self.position += 2
else:
return False
if op_code == 4:
self.output = self.read(1, mode_a)
self.position += 2
return False
if op_code == 5:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
if a != 0:
self.position = b
else:
self.position += 3
if op_code == 6:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
if a == 0:
self.position = b
else:
self.position += 3
if op_code == 7:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
value = 1 if a < b else 0
self.write(3, value, mode_c)
self.position += 4
if op_code == 8:
a = self.read(1, mode_a)
b = self.read(2, mode_b)
value = 1 if a == b else 0
self.write(3, value, mode_c)
self.position += 4
if op_code == 9:
a = self.read(1, mode_a)
self.base += a
self.position += 2
|
[
"peter.bergstrom@trioptima.com"
] |
peter.bergstrom@trioptima.com
|
3f263a2f1abc45d83649b00dfc604ebb900b4cbd
|
fa7deca280e1443d5ca79e9910f295a668be14b0
|
/compile.py
|
bc770262d10363c06f9b6fdd5e15420b7d681620
|
[
"Apache-2.0"
] |
permissive
|
tomas-cliqz/ichnaea
|
e210ba419eb2a69553594e3dd4dba2c56c88753a
|
1e49cc694b1e9c850417ac093e81849b1886b19e
|
refs/heads/master
| 2021-01-12T20:00:27.455686
| 2016-01-27T14:00:22
| 2016-01-27T14:00:22
| 49,965,385
| 0
| 0
| null | 2016-01-19T16:17:23
| 2016-01-19T16:17:22
| null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
"""
This script is used as part of the "make release" command used as part of
building an rpm of this entire virtualenv.
The rpm building process compiles all *.py files found anywhere in the
source tree, independent of whether or not these would actually be used.
It finds some Python files which aren't meant for the specific Python
version being build this way and would abort the build process.
We therefor specifically remove files from our site-packages directory,
which aren't meant for the current Python version and include incompatible
Python syntax.
"""
from compileall import compile_dir
from distutils.sysconfig import get_python_lib
import os
import os.path
import sys
# files excluded when run under Python 2.x
PYTHON_2_INCOMPATIBLE = [
'gunicorn/workers/_gaiohttp.py',
'linecache2/tests/inspect_fodder2.py',
]
# files excluded when run under Python 3.x
PYTHON_3_INCOMPATIBLE = [
'gevent/_util_py2.py',
]
def compile_files(path):
return compile_dir(path, maxlevels=50, quiet=True)
def remove_incompatible_files(path):
excludes = []
if sys.version_info < (3, 0):
excludes.extend(PYTHON_2_INCOMPATIBLE)
if sys.version_info >= (3, 0):
excludes.extend(PYTHON_3_INCOMPATIBLE)
for e in excludes:
fp = os.path.join(path, e)
for extension in ('', 'c', 'o'):
name = fp + extension
if os.path.exists(name):
print('Removing file %s with incompatible syntax.' % name)
os.remove(name)
def main():
sp = get_python_lib()
remove_incompatible_files(sp)
status = compile_files(sp)
sys.exit(not status)
if __name__ == '__main__':
main()
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
f745deca3f8bab4ea141b85d67a953beab121496
|
025fa245d4cbffdaa422287ed2f31c4d0442ee28
|
/orders/api/serializers.py
|
15a4a1cf958d28696d037af645fde921fd646007
|
[
"MIT"
] |
permissive
|
elcolie/zero-to-deploy
|
01f346ca50b8ccb271faef23934abe6a487baca6
|
6191a33ef55af7c550c0e529a4e373bfe40bc014
|
refs/heads/master
| 2022-02-08T23:22:17.008555
| 2018-06-15T19:39:06
| 2018-06-15T19:39:06
| 137,083,690
| 0
| 0
|
MIT
| 2022-01-21T19:35:33
| 2018-06-12T14:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
from rest_framework import serializers
from menus.models import Menu
from order_items.api.serializers import ShortItemSerializer
from order_items.models import OrderItem
from orders.models import Order
class OrderSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:order-detail')
customer = serializers.CurrentUserDefault()
order_items = ShortItemSerializer(read_only=True, many=True)
menus = serializers.PrimaryKeyRelatedField(queryset=Menu.objects.all(), many=True, write_only=True)
class Meta:
model = Order
fields = [
'url',
'customer',
'order_items',
'menus',
'sum',
'created_at',
'updated_at',
]
extra_kwargs = {
'created_at': {'read_only': True},
'updated_at': {'read_only': True},
}
def create(self, validated_data):
menus = validated_data.pop('menus')
order = Order.objects.create(customer=validated_data.get('customer'))
for item in menus:
OrderItem.objects.bulk_create([
OrderItem(order=order, menu=item)
])
return order
|
[
"sarit@elcolie.com"
] |
sarit@elcolie.com
|
33d9a5f14e08e91b0a36cea7796270daf9f6d3fc
|
71f19c14de97846193972830fbc9a4ec972d0ca9
|
/website/management/commands/tweet_something.py
|
9c69a942adf0593825c1b90f2c1043deb803ba42
|
[] |
no_license
|
wbt/govtrack.us-web
|
87790050aaba83ca6cca16e26ec796603317e7da
|
c94c0dfdc809ad506a54108802a2328cc26ca97d
|
refs/heads/master
| 2021-01-25T06:25:24.943845
| 2017-07-26T13:55:01
| 2017-07-26T13:55:01
| 93,572,164
| 0
| 0
| null | 2017-09-24T03:54:30
| 2017-06-06T23:15:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,400
|
py
|
#;encoding=utf8
from django.db.models import F
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from django.template.defaultfilters import truncatechars
from collections import defaultdict
import json, os, sys
from datetime import timedelta
class OkITweetedSomething(Exception):
pass
class Command(BaseCommand):
help = 'Tweets something interesting as @GovTrack.'
tweets_storage_fn = 'data/misc/tweets.json'
def handle(self, *args, **options):
# Construct client.
import twitter
self.twitter = twitter.Api(consumer_key=settings.TWITTER_OAUTH_TOKEN, consumer_secret=settings.TWITTER_OAUTH_TOKEN_SECRET,
access_token_key=settings.TWITTER_ACCESS_TOKEN, access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET)
# What have we tweeted about before? Let's not tweet
# it again.
self.load_previous_tweets()
try:
# Send out a tweet.
self.tweet_something()
except OkITweetedSomething:
pass
finally:
# Save the updated cache of previous tweets for next time.
self.save_previous_tweets()
def load_previous_tweets(self):
if not os.path.exists(self.tweets_storage_fn):
self.previous_tweets = { }
else:
self.previous_tweets = json.loads(open(self.tweets_storage_fn).read())
def save_previous_tweets(self):
with open(self.tweets_storage_fn, 'w') as output:
json.dump(self.previous_tweets, output, sort_keys=True, indent=2)
###
def tweet_something(self):
# Find something interesting to tweet!
self.tweet_new_signed_laws_yday()
self.tweet_votes_yday(True)
self.tweet_new_bills_yday()
self.tweet_coming_up()
self.tweet_a_bill_action()
self.tweet_votes_yday(False)
###
def post_tweet(self, key, text, url):
if key in self.previous_tweets:
return
text = truncatechars(text, 140-1-23-3) + " " + url
text += u" 🏛️" # there's a civics building emoji there indicating to followers this is an automated tweet? the emoji is two characters (plus a space before it) as Twitter sees it
if "TEST" in os.environ:
# Don't tweet. Just print and exit.
print key, text
sys.exit(1)
tweet = self.twitter.PostUpdate(text, verify_status_length=False) # it does not do link shortening test correctly
self.previous_tweets[key] = {
"text": text,
"when": timezone.now().isoformat(),
"tweet": tweet.AsDict(),
}
#print(json.dumps(self.previous_tweets[key], indent=2))
raise OkITweetedSomething()
###
def tweet_new_signed_laws_yday(self):
# Because of possible data delays, don't tweet until the afternoon.
if timezone.now().hour < 12: return
# Tweet count of new laws enacted yesterday.
from bill.models import Bill, BillStatus
count = Bill.objects.filter(
current_status_date__gte=timezone.now().date()-timedelta(days=1),
current_status_date__lt=timezone.now().date(),
current_status=BillStatus.enacted_signed,
).count()
if count == 0: return
self.post_tweet(
"%s:newlaws" % timezone.now().date().isoformat(),
"%d new law%s signed by the President yesterday." % (
count,
"s were" if count != 1 else " was",
),
"https://www.govtrack.us/congress/bills/browse#current_status[]=28&sort=-current_status_date")
def tweet_votes_yday(self, if_major):
# Tweet count of votes yesterday, by vote type if there were any major votes.
from vote.models import Vote, VoteCategory
votes = Vote.objects.filter(
created__gte=timezone.now().date()-timedelta(days=1),
created__lt=timezone.now().date(),
)
if votes.count() == 0: return
has_major = len([v for v in votes if v.is_major]) > 0
if not has_major and if_major: return
if not has_major:
count = votes.count()
msg = "%d minor vote%s held by Congress yesterday." % (
count,
"s were" if count != 1 else " was",
)
else:
counts = defaultdict(lambda : 0)
for v in votes:
counts[v.category] += 1
counts = list(counts.items())
counts.sort(key = lambda kv : (VoteCategory.by_value(kv[0]).importance, -kv[1]))
msg = "Votes held by Congress yesterday: " + ", ".join(
str(value) + " on " + VoteCategory.by_value(key).label
for key, value in counts
)
self.post_tweet(
"%s:votes" % timezone.now().date().isoformat(),
msg,
"https://www.govtrack.us/congress/votes")
def tweet_new_bills_yday(self):
# Because of possible data delays, don't tweet until the afternoon.
if timezone.now().hour < 12: return
# Tweet count of new bills introduced yesterday.
from bill.models import Bill, BillStatus
count = Bill.objects.filter(
introduced_date__gte=timezone.now().date()-timedelta(days=1),
introduced_date__lt=timezone.now().date(),
).count()
if count == 0: return
self.post_tweet(
"%s:newbills" % timezone.now().date().isoformat(),
"%d bill%s introduced in Congress yesterday." % (
count,
"s were" if count != 1 else " was",
),
"https://www.govtrack.us/congress/bills/browse#sort=-introduced_date")
def tweet_coming_up(self):
# legislation posted as coming up within the last day
from bill.models import Bill
dhg_bills = Bill.objects.filter(docs_house_gov_postdate__gt=timezone.now().date()-timedelta(days=1)).filter(docs_house_gov_postdate__gt=F('current_status_date'))
sfs_bills = Bill.objects.filter(senate_floor_schedule_postdate__gt=timezone.now().date()-timedelta(days=1)).filter(senate_floor_schedule_postdate__gt=F('current_status_date'))
coming_up = list(dhg_bills | sfs_bills)
coming_up.sort(key = lambda b : b.docs_house_gov_postdate if (b.docs_house_gov_postdate and (not b.senate_floor_schedule_postdate or b.senate_floor_schedule_postdate < b.docs_house_gov_postdate)) else b.senate_floor_schedule_postdate)
for bill in coming_up:
text = "Coming up: " + bill.display_number
if bill.sponsor and bill.sponsor.twitterid: text += " by @" + bill.sponsor.twitterid
text += ": " + bill.title_no_number
self.post_tweet(
"%s:comingup:%s" % (timezone.now().date().isoformat(), bill.congressproject_id),
text,
"https://www.govtrack.us" + bill.get_absolute_url())
def tweet_a_bill_action(self):
# Tweet an interesting action on a bill.
from bill.models import Bill, BillStatus
from bill.status import get_bill_really_short_status_string
bills = list(Bill.objects.filter(
current_status_date__gte=timezone.now().date()-timedelta(days=2),
current_status_date__lt=timezone.now().date(),
).exclude(
current_status=BillStatus.introduced,
))
if len(bills) == 0: return
# Choose bill with the most salient status, breaking ties with the highest proscore.
bills.sort(key = lambda b : (BillStatus.by_value(b.current_status).sort_order, b.proscore()), reverse=True)
for bill in bills:
status = BillStatus.by_value(bill.current_status).xml_code
if "Providing for consideration" in bill.title: continue
text = get_bill_really_short_status_string(status)
if text == "": continue
bill_number = bill.display_number
if bill.sponsor and bill.sponsor.twitterid: bill_number += " by @" + bill.sponsor.twitterid
text = text % (bill_number, u"y’day")
text += " " + bill.title_no_number
self.post_tweet(
bill.current_status_date.isoformat() + ":bill:%s:status:%s" % (bill.congressproject_id, status),
text,
"https://www.govtrack.us" + bill.get_absolute_url())
|
[
"jt@occams.info"
] |
jt@occams.info
|
4a34314cf3ab3799f0e9db22f7bf9934c45a1f33
|
d0ff9af885dc01de43ae7bdd2d26d6370c7b7ab5
|
/unsup_vvs/neural_fit/brainscore_mask/compute_rdms_from_activations.py
|
6d62b4160899f47a41da16e2251033e1f396d3d0
|
[] |
no_license
|
augix/unsup_vvs
|
a09f89c7d002006f59ffbe223c9469e959949e04
|
168ed0d068d27b7a7ca1dd5c1ebc28fbe84f8c7c
|
refs/heads/master
| 2023-07-17T05:55:27.630844
| 2021-06-24T01:27:28
| 2021-06-24T01:27:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
import pickle
import argparse
import os
import sys
import pdb
from tqdm import tqdm
import numpy as np
RESULT_CACHING_DIR = '/mnt/fs4/chengxuz/v4it_temp_results/.result_caching'
DEFAULT_SAVE_DIR = os.path.join(RESULT_CACHING_DIR, 'computed_rdms')
ACTIVATION_DIR = os.path.join(
RESULT_CACHING_DIR,
'model_tools.activations.core.ActivationsExtractorHelper._from_paths_stored')
ACTIVATION_PATTERN = 'activations'
def get_parser():
parser = argparse.ArgumentParser(
description='The script to compute RDMs from activations')
parser.add_argument(
'--save_dir', type=str,
default=DEFAULT_SAVE_DIR,
action='store',
help='Directory for saving rdm results')
return parser
def get_activation_pkls():
all_pkls = os.listdir(ACTIVATION_DIR)
all_pkls = list(filter(lambda name: ACTIVATION_PATTERN in name, all_pkls))
all_pkls = sorted(all_pkls)
all_pkls = [os.path.join(ACTIVATION_DIR, each_pkl) for each_pkl in all_pkls]
return all_pkls
def main():
parser = get_parser()
args = parser.parse_args()
all_pkls = get_activation_pkls()
os.system('mkdir -p ' + args.save_dir)
for each_pkl in tqdm(all_pkls):
save_path = os.path.join(
args.save_dir,
os.path.basename(each_pkl))
if os.path.exists(save_path):
continue
activations = pickle.load(open(each_pkl, 'rb'))['data']
all_layers = np.unique(activations.layer)
act_arr = np.asarray(activations)
layer_names = np.asarray(activations.layer)
_rdms = {}
for each_layer in all_layers:
_resp = act_arr[:, layer_names == each_layer]
_rdms[each_layer] = np.corrcoef(_resp)
pickle.dump(_rdms, open(save_path, 'wb'))
if __name__ == '__main__':
main()
|
[
"chengxuz@node07-ccncluster.stanford.edu"
] |
chengxuz@node07-ccncluster.stanford.edu
|
8a31fb2658e068c346166523f70684bc6035c3fc
|
410f0d66049ca881dfdeb9b7f784bd70b1c3f6e7
|
/bootea/bootea/pipelines.py
|
1bcd7524443c973b2ed7217b2a7ddcccda6d3f42
|
[] |
no_license
|
ans2human/Scrappers
|
1bdf5a1a4a34752c58fb18d45ac01e3cb54b5fe1
|
cb2183d25a8af08284f2a6c1311b8da24c720b96
|
refs/heads/master
| 2020-03-19T20:41:01.244911
| 2018-06-11T11:27:17
| 2018-06-11T11:27:17
| 136,911,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
from scrapy import log
from twisted.enterprise import adbapi
import time
import sys; sys.path.append("/users/user/appdata/local/programs/python/python36-32/lib/site-packages")
import MySQLdb.cursors
from bootea.items import test
class MySQLStorePipeline(object):
def __init__(self):
print ('init')
self.dbpool = adbapi.ConnectionPool('MySQLdb', db = 'usalogic_testdb', user='root', passwd='1234', cursorclass=MySQLdb.cursors.DictCursor, charset='utf8', use_unicode=True)
def process_item(self, item, spider):
print('process')
query = self.dbpool.runInteraction(self._conditional_insert, item) #("""INSERT INTO Example_Movie (title, url, gross, release) VALUES (%s, %s, %s, %s)""", (item['title'].endcode('utf-8'), item['url'].encode('utf-8'), item['gross'].encode('utf-8'), item['release'].encode('utf-8')))
query.addErrback(self.handle_error)#self.conn.commit()
return item
def _conditional_insert(self, tx, item):
print ('conditional insert')
#Create record if doesn't exist
#all this block run on it's own thread
tx.execute("select * from test where producturl = %s", (item['producturl'], ))
result = tx.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level = log.DEBUG)
else:
tx.execute("insert into test (producturl, prodprice, prodname) values (%s, %s, %s)", [item['producturl'], item['prodprice'], item['prodname']])
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
print ('handle_error')
log.err(e)
# from scrapy import log
# from twisted.enterprise import adbapi
# import time
# import sys; sys.path.append("/users/user/appdata/local/programs/python/python36-32/lib/site-packages")
# import MySQLdb.cursors
# from bootea.items import test
# class BooteaPipeline(object):
# def __init__(self):
# self.dbpool = adbapi.ConnectionPool('MySQLdb', db='usalogic_testdb',
# user='root', passwd='1234', cursorclass=MySQLdb.cursors.DictCursor,
# charset='utf8', use_unicode=True)
# def process_item(self, item, spider):
# query = self.dbpool.runInteraction(self._conditional_insert, item)
# query.addErrback(self.handle_error)
# return item
# def _conditional_insert(self, tx, item):
# tx.execute("select * from test where producturl = %s", (item['producturl'], ))
# result = tx.fetchone()
# if result:
# log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
# else:
# tx.execute("insert into test (producturl, prodname, prodprice) values (%s, %s, %s,)", [item['producturl'], item['prodname'], item['prodprice']])
# log.msg("Item stored in db: %s" % item, level=log.DEBUG)
# def handle_error(self, e):
# log.err(e)
|
[
"ans2human@gmail.com"
] |
ans2human@gmail.com
|
ecbbe380ee06e59502cd568e0d8911e8ee387e8b
|
ef72a7df3c39c215dd90ac5e72b164eb9d7da892
|
/rpg/heroes/exp.py
|
8580678d1bd1cda8e1e24f9779c8fce251493dcc
|
[] |
no_license
|
thebmo/messing_around
|
d49a87fc1ff722428ea67bc710ca99ad287098bd
|
4cb12e0b224cf7d1f93cb4ae6ff7603619fb7aa9
|
refs/heads/master
| 2021-01-13T02:18:50.799898
| 2015-04-08T01:12:41
| 2015-04-08T01:12:41
| 28,570,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
exp_to_next_level = [
0,
29,
87,
174,
304,
499,
792,
1232,
1891,
2880,
4364,
6218,
8534,
11428,
15045,
19114,
23690,
28837,
34627,
41141,
48468,
56711,
65983,
76413,
88147,
101347,
116196,
132901,
151694,
172836,
196621,
223378,
253480,
287344,
325440,
368298,
416512,
470752,
531771,
600417,
677644,
764524,
862263,
960002,
1057741,
1155480,
1253219,
1350958,
1448697,
1546436,
1644175,
1741914,
1839653,
1937392,
2035131,
2132870,
2230609,
2328348,
2426087,
2523826,
2621565,
2719304,
2817043,
2914782,
3012521,
3110260,
3207999,
3305738,
3403477,
3501216,
3598955,
3696694,
3794433,
3892172,
3989911,
4087650,
4185389,
4283128,
4380867,
4478606,
4576345,
4674084,
4771823,
4869562,
4967301,
5065040,
5162779,
5260518,
5358257,
5455996,
5553735,
5651474,
5749213,
5846952,
5944691,
6042430,
6140169,
6237908,
6335647
]
|
[
"bmosier@gmail.com"
] |
bmosier@gmail.com
|
3c415b6254a6a1dfd67e5c564bbeeab602bbbac5
|
c3432a248c8a7a43425c0fe1691557c0936ab380
|
/Greedy/1744_수묶기*.py
|
cb76fdcd5d14a3364ec30819632212850e7c8292
|
[] |
no_license
|
Parkyunhwan/BaekJoon
|
13cb3af1f45212d7c418ecc4b927f42615b14a74
|
9a882c568f991c9fed3df45277f091626fcc2c94
|
refs/heads/master
| 2022-12-24T21:47:47.052967
| 2022-12-20T16:16:59
| 2022-12-20T16:16:59
| 232,264,447
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
import heapq
n = int(input())
sm = 0
plus = []
minus = []
zero = 0
one = 0
for _ in range(n):
val = int(input())
if val == 1:
one += 1
elif val > 0:
heapq.heappush(plus, -val)
elif val == 0:
zero += 1
else:
heapq.heappush(minus, val)
if len(plus) % 2:
heapq.heappush(plus, -1)
if len(minus) % 2:
if zero > 0:
heapq.heappush(minus, 0)
else:
heapq.heappush(minus, 1)
while plus:
val1 = heapq.heappop(plus)
val2 = heapq.heappop(plus)
sm += (val1 * val2)
while minus:
val1 = heapq.heappop(minus)
val2 = heapq.heappop(minus)
sm += (val1 * val2)
sm += one
print(sm)
|
[
"pyh8618@gmail.com"
] |
pyh8618@gmail.com
|
3042f2d03444d4ec8ed65c9bcad199f8c3f11f73
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2536/60761/235389.py
|
3dece7a79508610db4ccd9b65cff8208cc2b29f7
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
airlines=input("")
airlines=list(map(str,airlines[2:-2].split("],[")))
result=[]
result.append("JFK")
i=0
while(0<len(airlines)):
destination=[]
for airline in airlines:
if(airline.startswith('"'+result[i])):
destination.append(airline[7:-1])
destination.sort()
airlines.remove('"'+result[i]+'","'+destination[0]+'"')
result.append(destination[0])
i=i+1
print(result)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
047f80690a5099e9f1505b2dd7da347d7bd2adc1
|
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
|
/378/Solution.py
|
2933a83513c07b54670ecbf4838330642d1675d9
|
[] |
no_license
|
zhangruochi/leetcode
|
6f739fde222c298bae1c68236d980bd29c33b1c6
|
cefa2f08667de4d2973274de3ff29a31a7d25eda
|
refs/heads/master
| 2022-07-16T23:40:20.458105
| 2022-06-02T18:25:35
| 2022-06-02T18:25:35
| 78,989,941
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
"""
Given a n x n matrix where each of the rows and columns are sorted in ascending order, find the kth smallest element in the matrix.
Note that it is the kth smallest element in the sorted order, not the kth distinct element.
Example:
matrix = [
[ 1, 5, 9],
[10, 11, 13],
[12, 13, 15]
],
k = 8,
return 13.
Note:
You may assume k is always valid, 1 ≤ k ≤ n2.
"""
import heapq
class Solution:
# time complexity O(nlogn) and space cmplexity is O(n*2)
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
lists_ = []
for row in matrix:
lists_ += row
lists_.sort()
return lists_[k-1]
# time complexity O(nk) and space cmplexity is O(n)
def kthSmallest2(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
n = len(matrix)
pointers = [0] * n
count = 0
while True:
min_,min_index = float("inf"),-1
for index,point in enumerate(pointers):
if point < n:
tmp_min = matrix[index][point]
if tmp_min < min_:
min_ = tmp_min
min_index = index
pointers[min_index] += 1
count += 1
if count == k:
return min_
import heapq
class MyItem:
def __init__(self,num,row,column):
self.num = num
self.row = row
self.column = column
def __lt__(self,item):
return self.num < item.num
def __repr__(self):
return "{}".format(self.num)
class Solution:
def kthSmallest(self, matrix, k):
n = len(matrix)
heap = [ MyItem(matrix[0][j],0,j) for j in range(n)]
heapq.heapify(heap)
for i in range(k):
item = heapq.heappop(heap)
num, row, column = item.num, item.row, item.column
if row+1 < n:
heapq.heappush(heap,MyItem(matrix[row+1][column],row+1,column))
return num
|
[
"zrc720@gmail.com"
] |
zrc720@gmail.com
|
cabf56396409a2b8bbe2bba525dd0a4347f411d5
|
4af8e73d47535494a25e06d5ac693fdc60fc95f5
|
/NLP_pytorch/04_pytorch_ignite/main.py
|
3c143464dd83f6b08e51033562e87b446747ba97
|
[] |
no_license
|
woosa7/nbcc_projects
|
4de2f846068bacdc73a6877d26dba93c483b1be3
|
0e7a30f58e554125d02451ab407f2a19c8c7b5f4
|
refs/heads/main
| 2023-06-16T16:19:51.975216
| 2021-07-16T05:18:53
| 2021-07-16T05:18:53
| 306,594,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from model import ImageClassifier
from trainer import Trainer
from data_loader import get_loaders
def define_argparser():
p = argparse.ArgumentParser()
p.add_argument('--model_fn', required=True)
p.add_argument('--gpu_id', type=int, default=0 if torch.cuda.is_available() else -1)
p.add_argument('--train_ratio', type=float, default=.8)
p.add_argument('--batch_size', type=int, default=256)
p.add_argument('--n_epochs', type=int, default=10)
p.add_argument('--verbose', type=int, default=2)
config = p.parse_args()
return config
def main(config):
# Set device based on user defined configuration.
device = torch.device('cpu') if config.gpu_id < 0 else torch.device('cuda:%d' % config.gpu_id)
train_loader, valid_loader, test_loader = get_loaders(config) # custom dataset & dataloader
print("Train:", len(train_loader.dataset))
print("Valid:", len(valid_loader.dataset))
print("Test:", len(test_loader.dataset))
model = ImageClassifier(28**2, 10).to(device)
optimizer = optim.Adam(model.parameters())
crit = nn.CrossEntropyLoss()
# Using Ignite
trainer = Trainer(config)
trainer.train(model, crit, optimizer, train_loader, valid_loader)
if __name__ == '__main__':
config = define_argparser()
main(config)
|
[
"woosa7@gmail.com"
] |
woosa7@gmail.com
|
71a26a87ecee866c893700324d40a1a2572e4a99
|
6eb58e32b469c37428185ab4456184905a5b4fb5
|
/analysis_code/parse_ICD.py
|
c9e4102115862dbdf62d0a5bc4cdcd1bf75b3e03
|
[] |
no_license
|
rchenmit/mht_analysis
|
0b8bfff7730df835975c7c41d65f007ad269e3a9
|
678d4419bdaed9ed9d0041df3a2cd8638074590f
|
refs/heads/master
| 2020-04-06T03:40:41.577209
| 2015-01-12T00:14:48
| 2015-01-12T00:14:48
| 19,548,658
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,113
|
py
|
## Robert Chen
## Monday 5/12/2014
##
## read and process BMI file
##
##
import datetime as dt
import scipy as s
## options
input_folder = '../../data/new_data_20140416/Data_20140409/'
output_dir = '../../data/new_data_20140416/Data_curated_RC/'
pickle_dir = '../analysis_output/pickle/'
bool_make_count_matrix_ALL = 0
bool_make_count_matrix_JD_CODE =
bool_make_count_matrix_JD_X_RANGE = 0
## Prepare data, read data
filename = input_folder + 'ICD_9_04082014.csv'
pd.set_option('display.line_width', 300)
df_ICD = pd.read_csv(filename, sep=',')
df_ICD['EVENT_DATE'] = pd.to_datetime(df_ICD['EVENT_DATE'])
## read in the ICD excel PheWAS file
pheWAS_xls_file = input_folder + 'ICD9-2-PheWAS.xls'
xls = pd.ExcelFile(pheWAS_xls_file)
df_pheWAS = xls.parse(xls.sheet_names[0])
## add columns to df_ICD for pheWAS: JD_CODE and JD_X_RANGE
df_ICD = df_ICD.merge(df_pheWAS, left_on = 'ICD_9_CODE', right_on='ICD9_CODE', how = 'left' )
## make a counts matrix
if bool_make_count_matrix_ALL:
unique_ICD_values = df_ICD.ICD_9_CODE.unique() #ARRAY with unique ICD codes as STRINGS
df_ICD_counts = pd.DataFrame(columns=['RUID'])
for icd in unique_ICD_values:
if isinstance(icd, str) or isinstance(jd, unicode):
if s.mod(len(df_ICD_counts.columns), 100) == 0:
print len(df_ICD_counts.columns)
df_this_icd = df_ICD[df_ICD.ICD_9_CODE==icd][['RUID', 'ICD_9_CODE']]
df_this_icd[icd] = df_this_icd.groupby('RUID').transform('count')
df_this_icd = df_this_icd.drop( 'ICD_9_CODE', 1)
df_this_icd = df_this_icd.drop_duplicates()
df_this_icd.replace(np.nan, 0)
if len(df_ICD_counts) == 0:
df_ICD_counts = df_this_icd.copy()
else:
df_ICD_counts = pd.merge(df_ICD_counts, df_this_icd, left_on='RUID', right_on='RUID', how='outer')
df_ICD_counts.to_csv( output_dir + 'df_ICD_counts.csv', index = False)
if bool_make_count_matrix_JD_CODE:
unique_JD_values = df_ICD.JD_CODE.unique() #ARRAY with unique ICD codes as STRINGS
df_JD_counts = pd.DataFrame(columns=['RUID'])
print "JD_Counts, n= " + str(len(unique_JD_values))
for jd in unique_JD_values:
if isinstance(jd, str) or isinstance(jd, unicode):
if s.mod(len(df_JD_counts.columns), 100) == 0:
print len(df_JD_counts.columns)
df_this_jd = df_ICD[df_ICD.JD_CODE==jd][['RUID', 'JD_CODE']]
df_this_jd[jd] = df_this_jd.groupby('RUID').transform('count')
df_this_jd = df_this_jd.drop( 'JD_CODE', 1)
df_this_jd = df_this_jd.drop_duplicates()
df_this_jd.replace(np.nan, 0)
if len(df_JD_counts) == 0: #base case
df_JD_counts = df_this_jd.copy()
else:
df_JD_counts = pd.merge(df_JD_counts, df_this_jd, left_on='RUID', right_on='RUID', how='outer')
df_JD_counts.to_csv( output_dir + 'df_JD_counts.csv', index = False)
if bool_make_count_matrix_JD_X_RANGE:
unique_JD_X_RANGE_values = df_ICD.JD_X_RANGE.unique() #ARRAY with unique ICD codes as STRINGS
df_JD_RANGE_counts = pd.DataFrame(columns=['RUID'])
print "JD_X_RANGE Counts, n= " + str(len(unique_JD_X_RANGE_values))
for jd in unique_JD_X_RANGE_values:
if isinstance(jd, str) or isinstance(jd, unicode):
if s.mod(len(df_JD_RANGE_counts.columns), 100) == 0:
print len(df_JD_RANGE_counts.columns)
df_this_jd = df_ICD[df_ICD.JD_X_RANGE==jd][['RUID', 'JD_X_RANGE']]
df_this_jd[jd] = df_this_jd.groupby('RUID').transform('count')
df_this_jd = df_this_jd.drop( 'JD_X_RANGE', 1)
df_this_jd = df_this_jd.drop_duplicates()
df_this_jd.replace(np.nan, 0)
if len(df_JD_RANGE_counts) == 0: #base case
df_JD_RANGE_counts = df_this_jd.copy()
else:
df_JD_RANGE_counts = pd.merge(df_JD_RANGE_counts, df_this_jd, left_on='RUID', right_on='RUID', how='outer')
df_JD_RANGE_counts.to_csv( output_dir + 'df_JD_RANGE_counts.csv', index = False)
|
[
"robchen401@gmail.com"
] |
robchen401@gmail.com
|
581ff3c3ff8eac75bf2c32be001fe3da752ff030
|
87bbeac699af7fa4dc76592acecef52dead1f436
|
/Adelphi Academic Calendar/skill/skill_env/Lib/site-packages/docutils/parsers/rst/languages/it.py
|
ad6085c6d6d78353bca04e8d50786cae5b9e5628
|
[
"MIT",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"Python-2.0",
"TCL",
"LicenseRef-scancode-newlib-historical"
] |
permissive
|
EnriqueGambra/Amazon-Alexa-Skill
|
79ec8848605a0e1e13092a1f6163dd63573322ec
|
198ed51bef555eee006041fef0bcbf5c955142d5
|
refs/heads/master
| 2022-12-02T01:01:48.419524
| 2019-10-23T21:45:49
| 2019-10-23T21:45:49
| 214,226,014
| 0
| 2
|
MIT
| 2022-11-27T06:15:00
| 2019-10-10T15:58:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,267
|
py
|
# $Id: it.py 7119 2011-09-02 13:00:23Z milde $
# Authors: Nicola Larosa <docutils@tekNico.net>;
# Lele Gaifax <lele@seldati.it>
# Copyright: This module has been placed in the public domain.
# Beware: the italian translation of the reStructuredText documentation
# at http://docit.bice.dyndns.org/static/ReST, in particular
# http://docit.bice.dyndns.org/static/ReST/ref/rst/directives.html, needs
# to be synced with the content of this file.
"""
Italian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'attenzione': 'attention',
'cautela': 'caution',
'code (translation required)': 'code',
'pericolo': 'danger',
'errore': 'error',
'suggerimento': 'hint',
'importante': 'important',
'nota': 'note',
'consiglio': 'tip',
'avvertenza': 'warning',
'ammonizione': 'admonition',
'riquadro': 'sidebar',
'argomento': 'topic',
'blocco-di-righe': 'line-block',
'blocco-interpretato': 'parsed-literal',
'rubrica': 'rubric',
'epigrafe': 'epigraph',
'punti-salienti': 'highlights',
'estratto-evidenziato': 'pull-quote',
'composito': 'compound',
'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'tabella': 'table',
'tabella-csv': 'csv-table',
'tabella-elenco': 'list-table',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'immagine': 'image',
'figura': 'figure',
'includi': 'include',
'grezzo': 'raw',
'sostituisci': 'replace',
'unicode': 'unicode',
'tmp': 'date',
'classe': 'class',
'ruolo': 'role',
'ruolo-predefinito': 'default-role',
'titolo': 'title',
'indice': 'contents',
'contenuti': 'contents',
'seznum': 'sectnum',
'sezioni-autonumerate': 'sectnum',
'annota-riferimenti-esterni': 'target-notes',
'intestazione': 'header',
'piede-pagina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Italian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'abbreviazione': 'abbreviation',
'acronimo': 'acronym',
'code (translation required)': 'code',
'indice': 'index',
'deponente': 'subscript',
'esponente': 'superscript',
'riferimento-titolo': 'title-reference',
'riferimento-pep': 'pep-reference',
'riferimento-rfc': 'rfc-reference',
'enfasi': 'emphasis',
'forte': 'strong',
'letterale': 'literal',
'math (translation required)': 'math',
'riferimento-con-nome': 'named-reference',
'riferimento-anonimo': 'anonymous-reference',
'riferimento-nota': 'footnote-reference',
'riferimento-citazione': 'citation-reference',
'riferimento-sostituzione': 'substitution-reference',
'destinazione': 'target',
'riferimento-uri': 'uri-reference',
'grezzo': 'raw',}
"""Mapping of Italian role names to canonical role names for interpreted text.
"""
|
[
"gambra7@gmail.com"
] |
gambra7@gmail.com
|
3ec541908b733c963a38d71f7f5949c8f8a7327d
|
465efab6e7b419d4493d09786a4b2d7a976f7a31
|
/src/Universe/LevelProps/Decorator.py
|
d036547a63ad9d0c74b8c3f991b1315b75e5aedb
|
[] |
no_license
|
dzz/kthuune
|
aa2cadcdfaed9a06b6384516be429575640a7896
|
2a8be25ec5303586e5a7e067c024d6e6ca171efa
|
refs/heads/master
| 2021-01-24T07:42:41.426973
| 2018-07-29T06:21:34
| 2018-07-29T06:21:34
| 93,354,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
from Newfoundland.Object import Object
from Beagle import API as BGL
class Decorator(Object):
textures = BGL.assets.get('KT-forest/animation/decorators')
def parse(od,df):
x1 = float(od["x"])
y1 = float(od["y"])
x2 = float(od["w"])
y2 = float(od["h"])
cx = (x1+x2) / 2.0
cy = (y1+y2) / 2.0
w = (x2-x1)*0.5
h = (y2-y1)*0.5
if "decorator_id" in od["meta"]:
texture = Decorator.textures[od["meta"]["decorator_id"]]
else:
texture = Decorator.textures[0]
if "self_lit" in od["meta"]:
if od["meta"]["self_lit"]:
light_type = Object.LightTypes.DYNAMIC_TEXTURE_OVERLAY
else:
light_type = Object.LightTypes.NONE
return Decorator(
p=[cx,cy],
size=[w,h],
scale_uv=[1.0,1.0],
tick_type = Object.TickTypes.STATIC,
visible = True,
buftarget = "floor",
texture = texture,
light_type = light_type
)
|
[
"devon.zachary@gmail.com"
] |
devon.zachary@gmail.com
|
c618214707b9fb6a6ca9236d529434d841c267c9
|
50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7
|
/challenge17(backOnTime)/solutions/FelipeAg.py
|
87790a5c157315c30659f1835cb2ca4e79b5036b
|
[
"MIT"
] |
permissive
|
banana-galaxy/challenges
|
792caa05e7b8aa10aad8e04369fc06aaf05ff398
|
8655c14828607535a677e2bb18689681ee6312fa
|
refs/heads/master
| 2022-12-26T23:58:12.660152
| 2020-10-06T13:38:04
| 2020-10-06T13:38:04
| 268,851,516
| 11
| 8
|
MIT
| 2020-09-22T21:21:30
| 2020-06-02T16:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
def solution(steps):
if len(steps) > 10:
return False
elif steps.count('n') == steps.count('s') and steps.count('e') == steps.count('w'):
return True
return False
|
[
"cawasp@gmail.com"
] |
cawasp@gmail.com
|
5b2207888d5917f774beaa8e83fa97856d693717
|
f6c69a7f7f1bbae5fd5473dfaac5ef5fad840d58
|
/lib/datatools/dataclass/datacatalog.py
|
61a0c4aa163e163193588df29a4266dfac7aa5a3
|
[
"Apache-2.0"
] |
permissive
|
JokerWDL/PyAnomaly
|
8c5ca4ca705a1251c70ff1f36c908c8f6f75e7d8
|
cf93437e5d7ae87fa916141cf4b5cc2e929b8199
|
refs/heads/master
| 2022-11-05T11:31:42.345422
| 2020-06-22T17:21:20
| 2020-06-22T17:21:20
| 274,295,638
| 1
| 0
|
Apache-2.0
| 2020-06-23T03:04:32
| 2020-06-23T03:04:31
| null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
'''
Refer to the detectron2's DatasetCatalog
'''
from typing import List
class DatasetCatalog(object):
"""
A catalog that stores information about the datasets and how to obtain them.
It contains a mapping from strings
(which are names that identify a dataset, e.g. "coco_2014_train")
to a function which parses the dataset and returns the samples in the
format of `list[dict]`.
The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
The purpose of having this catalog is to make it easy to choose
different datasets, by just using the strings in the config.
"""
_REGISTERED = {}
@staticmethod
def register(name, func):
"""
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
func (callable): a callable which takes no arguments and returns a list of dicts.
"""
assert callable(func), "You must register a function with `DatasetCatalog.register`!"
assert name not in DatasetCatalog._REGISTERED, "Dataset '{}' is already registered!".format(
name
)
DatasetCatalog._REGISTERED[name] = func
@staticmethod
def get(name, cfg, flag, aug):
"""
Call the registered function and return its results.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
Returns:
list[dict]: dataset annotations.0
"""
try:
f = DatasetCatalog._REGISTERED[name]
except KeyError:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(DatasetCatalog._REGISTERED.keys())
)
)
return f(cfg, flag, aug)
@staticmethod
def list() -> List[str]:
"""
List all registered datasets.
Returns:
list[str]
"""
return list(DatasetCatalog._REGISTERED.keys())
@staticmethod
def clear():
"""
Remove all registered dataset.
"""
DatasetCatalog._REGISTERED.clear()
|
[
"446358161@qq.com"
] |
446358161@qq.com
|
b5ead447cfefeb3618026a45f0fd21cea7995513
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/expensive/_internally_replaced_utils.py
|
8276fa0ab10c37bd4effe408fa3f5694ab102d26
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import importlib.machinery
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
|
[
"noreply@github.com"
] |
Oneflow-Inc.noreply@github.com
|
f9f83f196d52a47793819cfbd6af460d5fdce595
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon.py
|
f8d8b822bf224b7b14e0e62f9d3dfc07c4a33180
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon.json')
def test_storage_encoding_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1UCoFzRwpQhRg9BWz2QMNwzTud56fCdjSP_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
c5ff1437f0d351d49af69153cb81a3ca68b48a2c
|
f4475acdf01fa80ae3a638c19df30773cfd379dc
|
/listenclosely/admin.py
|
b7740fd874d120412af39807149d22c7247f7d97
|
[
"BSD-3-Clause"
] |
permissive
|
jlmadurga/listenclosely
|
ea48e7ea05f971ca4fc979f5f52d5a07ec74dcbb
|
d6df9110c3ed6fd337e0236cccbe4d931bf217b0
|
refs/heads/master
| 2023-01-07T13:27:30.024214
| 2016-03-12T12:40:29
| 2016-03-12T12:40:29
| 49,677,476
| 7
| 3
|
BSD-3-Clause
| 2022-12-26T20:23:37
| 2016-01-14T21:49:08
|
Python
|
UTF-8
|
Python
| false
| false
| 203
|
py
|
from django.contrib import admin
from listenclosely.models import Message, Chat, Agent, Asker
admin.site.register(Message)
admin.site.register(Chat)
admin.site.register(Agent)
admin.site.register(Asker)
|
[
"jlmadurga@gmail.com"
] |
jlmadurga@gmail.com
|
c383989311b6a33537436038b4dedd0a24e43d79
|
658773cf775fd97c3cec3aca5f559500dec021bc
|
/controllers/asset.py
|
c93abf8e08e4c373f5688d022a96348e5dd4d844
|
[
"MIT"
] |
permissive
|
smeissner/ifrc
|
f3795474219d20fba5c68192f5d9b90006288e3e
|
505eb6ffbb8fc32fdbbe63fdab4c19d87e53ca86
|
refs/heads/master
| 2021-01-18T10:43:55.847965
| 2012-10-07T22:43:15
| 2012-10-07T22:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
# -*- coding: utf-8 -*-
"""
Asset Management Functionality
http://eden.sahanafoundation.org/wiki/BluePrint/Assets
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to asset/create """
redirect(URL(f="asset", args="create"))
# -----------------------------------------------------------------------------
def asset():
""" RESTful CRUD controller """
# Use the item() controller in this module to set options correctly
s3db.asset_asset.item_id.comment = S3AddResourceLink(f="item",
label=T("Add New Item"),
title=T("Item"),
tooltip=T("Type the name of an existing catalog item OR Click 'Add New Item' to add an item which is not in the catalog."))
# Defined in Model for use from Multiple Controllers for unified menus
return s3db.asset_controller()
# =============================================================================
def item():
""" RESTful CRUD controller """
# Filter to just Assets
table = s3db.supply_item
ctable = s3db.supply_item_category
s3.filter = (table.item_category_id == ctable.id) & \
(ctable.can_be_asset == True)
# Limit the Categories to just those with vehicles in
# - make category mandatory so that filter works
field = s3db.supply_item.item_category_id
field.requires = IS_ONE_OF(db,
"supply_item_category.id",
s3db.supply_item_category_represent,
sort=True,
filterby = "can_be_asset",
filter_opts = [True]
)
field.comment = S3AddResourceLink(f="item_category",
label=T("Add Item Category"),
title=T("Item Category"),
tooltip=T("Only Categories of type 'Vehicle' will be seen in the dropdown."))
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.supply_item_controller()
# =============================================================================
def item_category():
""" RESTful CRUD controller """
table = s3db.supply_item_category
# Filter to just Assets
s3.filter = (table.can_be_asset == True)
# Default to Assets
field = table.can_be_asset
field.readable = field.writable = False
field.default = True
return s3_rest_controller("supply", "item_category")
# END =========================================================================
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
f7cb7a5de948cd36e56b54eee4c79406be49b77a
|
dffd7156da8b71f4a743ec77d05c8ba031988508
|
/ac/abc109/abc109_b/11416688.py
|
a108bbb8ac35b9102f2c74dfe3e7b4513b27c333
|
[] |
no_license
|
e1810/kyopro
|
a3a9a2ee63bc178dfa110788745a208dead37da6
|
15cf27d9ecc70cf6d82212ca0c788e327371b2dd
|
refs/heads/master
| 2021-11-10T16:53:23.246374
| 2021-02-06T16:29:09
| 2021-10-31T06:20:50
| 252,388,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
st = set()
cant = False
prev = "-"
for i in range(int(input())):
s = input()
if (prev!="-" and prev!=s[0]) or s in st: cant = True
prev = s[-1]
st.add(s)
print("YNeos"[cant::2])
|
[
"v.iceele1810@gmail.com"
] |
v.iceele1810@gmail.com
|
72a7cb385ba562e3c44eeb5951a6ad27b05b8072
|
992f6a7436a9755d13bfbf0e3e0d98daa7541f1a
|
/coresite/views.py
|
3956a3f993453816a94d377858f59f39293cc9de
|
[
"MIT"
] |
permissive
|
Klim314/argent_app
|
838c3f6b2d15666670ea1e90ac0c23bdc0df50aa
|
767a0a11646fc08fb7197a191348466c913fe360
|
refs/heads/master
| 2021-01-22T05:32:44.626421
| 2017-09-17T15:23:54
| 2017-09-17T15:23:54
| 102,282,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
from django.shortcuts import render, reverse, HttpResponseRedirect, HttpResponse
from django.views import View
from argent_app.models import Room, InRoom
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class Register(View):
def post(self, request):
print(request.POST)
username, password = request.POST["username"], request.POST["password1"]
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("room_manage"))
else:
# if user does not exist, create
if User.objects.filter(username=username).exists():
return HttpResponseRedirect(reverse("register"),
context={"user_exists": True})
else:
user = User.objects.create_user(username, password=password)
login(request, user)
return HttpResponseRedirect(reverse("room_manage"))
def get(self, request):
return render(request, "registration/register.html", {"form": UserCreationForm()})
|
[
"klim314@gmail.com"
] |
klim314@gmail.com
|
4fe09e4033ab5248274e1eb4eca7d375acc4598d
|
b1ba5707a5cbe918d33bc2082b3eb4ff1378c060
|
/SDPython/tests/test_sd/test_package.py
|
21b1b208b6690f20449ad6c4d37e68781b3b36f3
|
[] |
no_license
|
qq781217732/SubstanceDev
|
2eb1d9ed48d477cf70c7bfdac2103bb884e9204c
|
b9ffab0a1b8f3c01783259074940b2712a8142b8
|
refs/heads/master
| 2023-03-26T00:43:35.047305
| 2021-03-01T04:12:28
| 2021-03-01T04:12:28
| 342,539,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,105
|
py
|
# ADOBE CONFIDENTIAL
#
# Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying it.
# If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
import unittest
import sd
from tests import tools
class TestPackage(unittest.TestCase):
@classmethod
def setUpClass(cls):
context = sd.getContext()
# Load the reference package
cls.sdPackage = tools.loadSDPackage(context, 'test_sdpackage.sbs')
# Load some other packages
cls.sdPackageTestNewContent = tools.loadSDPackage(context, 'test_write_content.sbs')
def testPackagesLoaded(self):
self.assertTrue(self.sdPackage, 'Fail to load package')
self.assertTrue(self.sdPackageTestNewContent, 'Fail to load package')
def test_SDPackage_getChildrenResources(self):
# Check Non Recursive mode
sbsResourceArray = self.sdPackage.getChildrenResources(False)
self.assertEqual(len(sbsResourceArray), 3)
# Check Recursive Mode
sbsResourceArray = self.sdPackage.getChildrenResources(True)
self.assertEqual(len(sbsResourceArray), 5)
def test_SDPackage_findResourceFromUrl(self):
# Check that a resource of the reference package can be retrieved
sbMDLSubGraph = self.sdPackage.findResourceFromUrl('folder0/mdl_sub_graph')
self.assertTrue(sbMDLSubGraph)
# Check that a resource in another can't be found in the reference package
sbPBRGraph = self.sdPackage.findResourceFromUrl('pbr_graph')
self.assertFalse(sbPBRGraph)
def test_SDPackage_getDependencies(self):
pkgDeps = self.sdPackage.getDependencies()
self.assertEqual(len(pkgDeps), 1)
firstPkgDep = pkgDeps[0]
self.assertTrue(len(firstPkgDep.getFilePath())>0)
self.assertTrue(firstPkgDep.getPackage())
if __name__ == '__main__':
unittest.main()
|
[
"gaoyuyang@senseinn.com"
] |
gaoyuyang@senseinn.com
|
cc25b49cba139ab957706257cc9e9cdd5119b7a6
|
382c8cfb29f420297462d122c571995b62e10a6b
|
/temp.py
|
ab5d445e6bd49d2df01744e32851e61b3e05f170
|
[] |
no_license
|
innovation-labs/Vader
|
c1a9592cc02f85cc5a28b3116fc41b35df5baf04
|
aa9091f90c41fe2b1ae6e488670bf89bcbbde5c9
|
refs/heads/master
| 2021-08-23T21:18:46.609875
| 2017-12-06T15:50:56
| 2017-12-06T15:50:56
| 103,031,449
| 0
| 0
| null | 2017-09-10T13:28:51
| 2017-09-10T13:28:50
| null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
#
from django.conf import settings
from geoip2 import database
from geoip2.errors import AddressNotFoundError
from apps.warehouse.models import IPStore
reader = database.Reader(settings.MAXMIND_CITY_DB)
ips = ['99.248.9.54',
'173.34.75.225',
'70.54.130.204',
'67.58.222.87',
'70.55.50.230',
'76.71.67.164',
'70.24.105.229',
'64.231.136.194',
'135.0.4.175',
'173.34.222.226',
'174.92.74.247',
'99.231.160.194',
'184.151.178.201',
'70.49.149.23',
'66.49.185.244',
'70.53.51.197',
'174.112.43.253',
'173.34.125.63',
'64.231.148.82',
'66.49.190.181',
'173.32.111.198',
'70.50.213.134',
'50.100.149.203',
'99.230.228.92',
'184.151.190.55',
'24.114.51.122',
'174.118.26.209',
'73.201.179.235',
'99.237.95.19',
'76.71.112.4',
'76.71.4.24',
'76.68.126.170',
'174.115.124.199',
'99.243.22.198',
'69.157.66.143',
'99.226.8.59',
'70.26.57.62',
'184.147.122.233',
'216.165.217.88',
'99.233.178.15',
'72.15.61.181', ]
def update_gecode(ip, location):
from googlemaps import Client
# from django.conf import settings
import json
ip.latitude = location['latitude']
ip.longitude = location['longitude']
gmaps = Client(key=settings.GOOGLE_GEOCODE_KEY)
result = gmaps.reverse_geocode((location['latitude'], location['longitude']
))
ip.geocode = json.dumps(result)
print result
ip.save()
for ip in ips:
try:
ip2geo = reader.city(ip).raw
location = ip2geo['location']
store, created = IPStore.objects.get_or_create(ip=ip)
if created:
update_gecode(store, location)
except AddressNotFoundError as e:
print e
|
[
"yousuf.jawwad@gmail.com"
] |
yousuf.jawwad@gmail.com
|
1bff2ac0967e16eaba266de1b1749c487521c995
|
08778088da558a8bc79326754bb86f61c4cf082b
|
/eisitirio/database/waiting.py
|
5d06fb161572f0a5a5f73faf4ed6d38b4a5a6dad
|
[] |
no_license
|
toastwaffle/Eisitirio
|
b5a7c48fc015857bfccdbe3f702e4c12c2e8277c
|
64ff15704b6b62d6ed385f7add59e7face88a95c
|
refs/heads/master
| 2020-05-24T15:51:12.023834
| 2019-09-12T19:09:23
| 2019-09-12T19:09:23
| 187,341,863
| 0
| 0
| null | 2019-05-18T09:59:35
| 2019-05-18T09:59:35
| null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
# coding: utf-8
"""Database model for entries on the waiting list."""
from __future__ import unicode_literals
import datetime
from eisitirio.database import db
DB = db.DB
class Waiting(DB.Model):
"""Model for entries on the waiting list."""
__tablename__ = "waiting"
waiting_since = DB.Column(DB.DateTime(), nullable=False)
waiting_for = DB.Column(DB.Integer(), nullable=False)
user_id = DB.Column(DB.Integer, DB.ForeignKey("user.object_id"), nullable=False)
user = DB.relationship(
"User", backref=DB.backref("waiting", lazy="dynamic"), foreign_keys=[user_id]
)
def __init__(self, user, waiting_for):
self.user = user
self.waiting_for = waiting_for
self.waiting_since = datetime.datetime.utcnow()
def __repr__(self):
return "<Waiting: {0} for {1} ticket{2}>".format(
self.user.full_name, self.waiting_for, "" if self.waiting_for == 1 else "s"
)
|
[
"samuel.littley@toastwaffle.com"
] |
samuel.littley@toastwaffle.com
|
7aba7f32ed96c5e577fdeed1ddb2f5bf167c0d91
|
0d32e3819606c3fb6820d0cd5f5097db3b0d3dd4
|
/HW3/q_learning_no_epsilon_decay_mountain_car.py
|
6afd1f1d11cfe24eb60909d28e8ea03a2aa4b912
|
[] |
no_license
|
IanCBrown/COMP5600
|
e8e06b2a8e3bde0acc6897adb2396a57a2811f0a
|
ef454c009d6fd5eec50ceec5a8283a7c6d81d097
|
refs/heads/master
| 2020-08-02T13:20:41.024681
| 2019-12-09T03:53:37
| 2019-12-09T03:53:37
| 211,366,293
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,059
|
py
|
import math
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import gym
from gym import spaces
from gym.utils import seeding
# Resources:
# https://en.wikipedia.org/wiki/Mountain_car_problem
# https://towardsdatascience.com/getting-started-with-reinforcement-learning-and-open-ai-gym-c289aca874f
# https://towardsdatascience.com/reinforcement-learning-temporal-difference-sarsa-q-learning-expected-sarsa-on-python-9fecfda7467e
def epsilon_greedy(Q, state, action_space, epsilon):
# if in epsilon range use it
if np.random.rand() < 1 - epsilon:
action = np.argmax(Q[state[0], state[1]])
# else take random action
else:
action = np.random.randint(0, action_space)
return action
def q_learning(learning_rate, discount, epsilon, episodes):
# initialize environment
env = gym.make("MountainCar-v0")
env.reset()
states = (env.observation_space.high - env.observation_space.low)*np.array([10,100])
states = np.round(states, 0).astype(int) + 1
# Q(s,a)
Q_table = np.random.uniform(low = -1, high = 1, size = (states[0], states[1], env.action_space.n))
reward_list = []
var_list = []
avg_reward_list = []
# Q learning main loop
for i in range(episodes):
finished = False
total_reward = 0
reward = 0
state = env.reset()
state_adj = (state - env.observation_space.low)*np.array([10,100])
state_adj = np.round(state_adj, 0).astype(int)
while not finished:
# render last N episodes
# comment out to see plots
# if i >= episodes - 1:
# env.render()
# action = epsilon_greedy(Q_table, state_adj, env.action_space.n, epsilon)
# pick aciton greedily without randomness
action = np.argmax(Q_table[state_adj[0], state_adj[1]])
next_state, reward, finished, info = env.step(action)
# Discretize
next_state_adj = (next_state - env.observation_space.low)*np.array([10,100])
next_state_adj = np.round(next_state_adj, 0).astype(int)
if finished and next_state[0] >= 0.5: # and ... condition
Q_table[state_adj[0], state_adj[1], action] = reward
else:
update = learning_rate * (reward + discount * np.max(Q_table[next_state_adj[0],next_state_adj[1]])
- Q_table[state_adj[0], state_adj[1], action])
# update Q table
Q_table[state_adj[0], state_adj[1], action] += update
total_reward += reward
state_adj = next_state_adj
reward_list.append(total_reward)
# choose how often to record data
# recording every data point will make the plots crowded
# 10 and 100 work well.
recording_interval = 100
if i % recording_interval == 0:
avg_reward = np.mean(reward_list)
var = np.var(reward_list)
var_list.append(var)
avg_reward_list.append(avg_reward)
reward_list = []
env.close()
return (avg_reward_list, var_list)
# Adjust these parameters as needed
number_of_episodes = 2500
learning_rate = 0.1
gamma = 0.9
epsilon = 0.8
def single_run():
"""
Run the algorithm once
"""
rewards_and_var = q_learning(learning_rate, gamma, epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, avg_reward, color='blue')
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var, color='orange')
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, avg_reward, var, linestyle='None', marker='^', ecolor="orange")
plt.show()
def multi_run(N):
"""
Run the algorithm N times
@param N - number of times to test (e.g. 20)
"""
rewards = []
vars = []
for _ in range(N):
rewards_and_var = q_learning(learning_rate, gamma, epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
rewards.append(avg_reward)
vars.append(var)
rewards = list(zip(*rewards))
vars = list(zip(*vars))
reward_to_plot = []
for sublist in rewards:
reward_to_plot.append(np.mean(sublist))
var_to_plot = []
for sublist in vars:
var_to_plot.append(np.mean(sublist))
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, reward_to_plot, color='blue')
plt.savefig("q_learning_no_epsilon_results/Average_Reward_vs_Episodes.png")
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var_to_plot, color='orange')
plt.savefig("q_learning_no_epsilon_results/Variance_vs_Episodes.png")
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, reward_to_plot, var_to_plot, linestyle='None', marker='^', ecolor="orange")
plt.savefig("q_learning_no_epsilon_results/Average_Reward_and_Variance_vs_Episodes.png")
# choose multi or single run
# single_run()
multi_run(20)
|
[
"icb0004@auburn.edu"
] |
icb0004@auburn.edu
|
04495c8e1849b8df9b7b60f749e3172809966b93
|
7e419f7046386c20b9a6ed121c377fbcc8ff6885
|
/mobject/region.py
|
d5ba70ff04615c627dc85bf3d0a85a9d4bddec0d
|
[] |
no_license
|
ralusek/manim
|
47ebabb4d42accdda42a0da32ccdef6b129205b8
|
d53412d738e3b9d306b7d7d2ad39cac702ff3d18
|
refs/heads/master
| 2021-04-29T00:47:40.990006
| 2018-02-16T20:16:59
| 2018-02-16T20:16:59
| 121,837,012
| 1
| 0
| null | 2018-02-17T07:44:48
| 2018-02-17T07:44:47
| null |
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
import numpy as np
import itertools as it
from PIL import Image
from copy import deepcopy
from mobject import Mobject
from helpers import *
#TODO, this whole class should be something vectorized.
class Region(Mobject):
CONFIG = {
"display_mode" : "region"
}
def __init__(self, condition = (lambda x, y : True), **kwargs):
"""
Condition must be a function which takes in two real
arrays (representing x and y values of space respectively)
and return a boolean array. This can essentially look like
a function from R^2 to {True, False}, but & and | must be
used in place of "and" and "or"
"""
Mobject.__init__(self, **kwargs)
self.condition = condition
def _combine(self, region, op):
self.condition = lambda x, y : op(
self.condition(x, y),
region.condition(x, y)
)
def union(self, region):
self._combine(region, lambda bg1, bg2 : bg1 | bg2)
return self
def intersect(self, region):
self._combine(region, lambda bg1, bg2 : bg1 & bg2)
return self
def complement(self):
self.bool_grid = ~self.bool_grid
return self
class HalfPlane(Region):
def __init__(self, point_pair, upper_left = True, *args, **kwargs):
"""
point_pair of the form [(x_0, y_0,...), (x_1, y_1,...)]
Pf upper_left is True, the side of the region will be
everything on the upper left side of the line through
the point pair
"""
if not upper_left:
point_pair = list(point_pair)
point_pair.reverse()
(x0, y0), (x1, y1) = point_pair[0][:2], point_pair[1][:2]
def condition(x, y):
return (x1 - x0)*(y - y0) > (y1 - y0)*(x - x0)
Region.__init__(self, condition, *args, **kwargs)
def region_from_line_boundary(*lines, **kwargs):
reg = Region(**kwargs)
for line in lines:
reg.intersect(HalfPlane(line, **kwargs))
return reg
def region_from_polygon_vertices(*vertices, **kwargs):
return region_from_line_boundary(*adjacent_pairs(vertices), **kwargs)
def plane_partition(*lines, **kwargs):
"""
A 'line' is a pair of points [(x0, y0,...), (x1, y1,...)]
Returns the list of regions of the plane cut out by
these lines
"""
result = []
half_planes = [HalfPlane(line, **kwargs) for line in lines]
complements = [deepcopy(hp).complement() for hp in half_planes]
num_lines = len(lines)
for bool_list in it.product(*[[True, False]]*num_lines):
reg = Region(**kwargs)
for i in range(num_lines):
if bool_list[i]:
reg.intersect(half_planes[i])
else:
reg.intersect(complements[i])
if reg.bool_grid.any():
result.append(reg)
return result
def plane_partition_from_points(*points, **kwargs):
"""
Returns list of regions cut out by the complete graph
with points from the argument as vertices.
Each point comes in the form (x, y)
"""
lines = [[p1, p2] for (p1, p2) in it.combinations(points, 2)]
return plane_partition(*lines, **kwargs)
|
[
"grantsanderson7@gmail.com"
] |
grantsanderson7@gmail.com
|
3d44285b7f4667f8c29bbb1bf7a019e76508ecb4
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/pillow/change-pixels-numpy-array-PixelAccess/main-PixelAccess.py
|
753aed3b65ccfc089b9e2661eb5527fc4b576b6e
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
#!/usr/bin/env python3
# date: 2019.09.29
# `PixelAccess` changes pixels in original `img`
# so there is no need to convert it back to `Image`
# BTW: Image uses [col,row] (array uses [row,col])
from PIL import Image
img = Image.open('image.jpg')
pixels = img.load()
width, height = img.size
for col in range(width):
for row in range(height):
if pixels[col,row] == (0, 0, 0):
pixels[col,row] = (255, 0 ,0)
img.show()
|
[
"furas@tlen.pl"
] |
furas@tlen.pl
|
8af072ca10d2818422727df5fbd11c71012690ef
|
5da80918ac50156f351966f96e2131123222d483
|
/hack/coalesce.py
|
ddb07665af6933731c1eef0253ccadbd9605a08e
|
[
"Apache-2.0"
] |
permissive
|
dinomiteX/cluster-api-provider-aws
|
43045e2c2d1836f1722795e5d4afeef2a3b407ea
|
bc4496ff9235a64f81dd9e6f2c97e368b6099431
|
refs/heads/master
| 2020-07-07T14:50:26.244166
| 2019-08-20T13:37:01
| 2019-08-20T13:37:01
| 203,380,287
| 0
| 0
|
Apache-2.0
| 2019-08-20T13:18:24
| 2019-08-20T13:18:23
| null |
UTF-8
|
Python
| false
| false
| 3,191
|
py
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stolen from https://github.com/kubernetes/test-infra/blob/master/hack/coalesce.py
"""Coalesces bazel test results into one file."""
import argparse
import os
import re
import xml.etree.ElementTree as ET
BAZEL_FAILURE_HEADER = '''exec ${PAGER:-/usr/bin/less} "$0" || exit 1
-----------------------------------------------------------------------------
'''
# from https://www.w3.org/TR/xml11/#charsets
# RestrictedChar ::= [#x1-#x8]|[#xB-#xC]|[#xE-#x1F]|[#x7F-#x84]|[#x86-#x9F]
RESTRICTED_XML_CHARS_RE = re.compile(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F]')
ANSI_ESCAPE_CODES_RE = re.compile(r'\033\[[\d;]*[@-~]')
def test_packages(root):
"""Yields test package directories under root."""
for package, _, files in os.walk(root):
if 'test.xml' in files and 'test.log' in files:
yield package
def sanitize(text):
if text.startswith(BAZEL_FAILURE_HEADER):
text = text[len(BAZEL_FAILURE_HEADER):]
# ANSI escape sequences should be removed.
text = ANSI_ESCAPE_CODES_RE.sub('', text)
# And any other badness that slips through.
text = RESTRICTED_XML_CHARS_RE.sub('', text)
return text
def result(pkg):
"""Given a directory, create a testcase element describing it."""
elem = ET.Element('testcase')
elem.set('classname', 'go_test')
pkg_parts = pkg.split('/')
elem.set('name', '//%s:%s' % ('/'.join(pkg_parts[1:-1]), pkg_parts[-1]))
elem.set('time', '0')
suites = ET.parse(pkg + '/test.xml').getroot()
for suite in suites:
for case in suite:
for status in case:
if status.tag == 'error' or status.tag == 'failure':
failure = ET.Element('failure')
with open(pkg + '/test.log') as fp:
text = fp.read().decode('UTF-8', 'ignore')
failure.text = sanitize(text)
elem.append(failure)
return elem
def main():
root = ET.Element('testsuite')
root.set('time', '0')
for package in sorted(test_packages('bazel-testlogs')):
root.append(result(package))
artifacts_dir = os.environ['ARTIFACTS']
try:
os.mkdir(artifacts_dir)
except OSError:
pass
with open(os.path.join(artifacts_dir, 'junit_bazel.xml'), 'w') as fp:
fp.write(ET.tostring(root, 'UTF-8'))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Coalesce JUnit results.')
PARSER.add_argument('--repo_root', default='.')
ARGS = PARSER.parse_args()
os.chdir(ARGS.repo_root)
main()
|
[
"k8s-ci-robot@users.noreply.github.com"
] |
k8s-ci-robot@users.noreply.github.com
|
92785644c971f51c1aff7d47c833a14c473328a3
|
dea5dfdc661309fa26fc93d4884fbcbb75e4d8a0
|
/Back_ground/control/classcontrol.py
|
be904879e1ec0441ee72a9fc24b89447004fa001
|
[
"Apache-2.0"
] |
permissive
|
sherwel/Behavior_culture
|
0583ba061d7a2feba8f45c2b574005fcc76e4cfd
|
43b68e50aed369aacefc5435ec4f9b70960d2344
|
refs/heads/master
| 2021-01-10T03:47:20.328109
| 2016-02-29T04:42:49
| 2016-02-29T04:42:49
| 50,472,281
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,199
|
py
|
#!/usr/bin/python
#coding:utf-8
from tool import SQLTool ,config
from Back_ground.model import takeclass,Class
limitpage=15
localconfig=config.Config()
def haveclassshow(schoolid='',classid='',teacherid='',page='0'):
validresult=False
request_params=[]
values_params=[]
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if classid!='':
request_params.append('t_classes.classId')
values_params.append(SQLTool.formatstring(classid))
if teacherid!='':
request_params.append('teacherId')
values_params.append(SQLTool.formatstring(teacherid))
request_params.append('t_teach.teacherId')
values_params.append('t_teachers.teacherId')
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
table=localconfig.teachertable
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.teachtable], ['t_teachers.teacherId','masterId','time','schoolId','teacherName','classId'], request_params, values_params)
if count == 0:
pagecount = 0;
elif count %limitpage> 0:
# pagecount = math.ceil(count / limitpage)
pagecount=int((count+limitpage-1)/limitpage)
else:
pagecount = count / limitpage
# print pagecount
if pagecount>0:
limit=' limit '+str(int(page)*limitpage)+','+str(limitpage)
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.teachtable], ['t_teachers.teacherId','masterId','time','schoolId','teacherName','classId'], request_params, values_params,limit,order='time desc')
DBhelp.closedb()
classes=[]
if count>0:
validresult=True
for temp in result :
aclass=takeclass.Takeclass(teacherid=temp['teacherId'],schoolid=temp['schoolId'],masterid=temp['masterId'],time=temp['time'],teachername=temp['teacherName'],classid=temp['classId'])
classes.append(aclass)
return classes,count,pagecount
return [],0,pagecount
def classshow(schoolname='',schoolid='',gradeid='',classid='',classname='',page='0'):
validresult=False
request_params=[]
values_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if gradeid!='':
request_params.append('t_classes.gradeId')
values_params.append(SQLTool.formatstring(gradeid))
if classid!='':
request_params.append('t_classes.classId')
values_params.append(SQLTool.formatstring(classid))
if classname!='':
request_params.append('t_class_name.className')
values_params.append(SQLTool.formatstring(classname))
if schoolid!='':
request_params.append('t_classes.schoolId')
values_params.append(SQLTool.formatstring(schoolid))
request_params.append('t_school.schoolId')
values_params.append('t_classes.schoolId')
request_params.append('t_classes.classId')
values_params.append('t_class_name.classId')
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
table=localconfig.schooltable
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.classtable,localconfig.classnametable], ['schoolName','t_classes.schoolId','t_classes.gradeId','cId','t_class_name.className','t_classes.classId'], request_params, values_params)
if count == 0:
pagecount = 0;
elif count %limitpage> 0:
# pagecount = math.ceil(count / limitpage)
pagecount=int((count+limitpage-1)/limitpage)
else:
pagecount = count / limitpage
# print pagecount
if pagecount>0:
limit=' limit '+str(int(page)*limitpage)+','+str(limitpage)
result,content,count,col=DBhelp.searchtableinfo_byparams([table,localconfig.classtable,localconfig.classnametable], ['schoolName','t_classes.schoolId','t_classes.gradeId','cId','t_class_name.className','t_classes.classId'], request_params, values_params,limit,order='schoolId desc')
DBhelp.closedb()
classes=[]
if count>0:
validresult=True
for temp in result :
aclass=Class.Class(schoolname=temp['schoolName'],schoolid=temp['schoolId'],gradeid=temp['gradeId'],cid=temp['cId'],classname=temp['className'],classid=temp['classId'])
classes.append(aclass)
return classes,count,pagecount
return [],0,pagecount
##count为返回结果行数,col为返回结果列数,count,pagecount都为int型
def loadclass(request,username=''):
schoolname=request.POST.get('schoolname','')
schoolid=request.POST.get('schoolid','')
province=request.POST.get('province','')
city=request.POST.get('city','')
starttime=request.POST.get('starttime','')
tempschool=None
if schoolid=='' or schoolname=='':
return tempschool,False
tempschool=school.School(schoolname=schoolname,schoolid=schoolid,province=province,city=city)
return tempschool,True
def classadd(school):
schoolname=school.getSchoolname()
schoolid=school.getSchoolid()
province=school.getProvince()
city=school.getCity()
starttime=school.getStarttime()
request_params=[]
values_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if province!='':
request_params.append('province')
values_params.append(SQLTool.formatstring(province))
if city!='':
request_params.append('city')
values_params.append(SQLTool.formatstring(city))
if starttime!='':
request_params.append('starttime')
values_params.append(SQLTool.formatstring(starttime))
table=localconfig.schooltable
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
tempresult=DBhelp.inserttableinfo_byparams(table=table, select_params=request_params,insert_values= [tuple(values_params)])
DBhelp.closedb()
return tempresult
def classupdate(schoolname='',schoolid='',province='',city='',starttime=''):
request_params=[]
values_params=[]
wset_params=[]
wand_params=[]
if schoolname!='':
request_params.append('schoolName')
values_params.append(SQLTool.formatstring(schoolname))
if schoolid!='':
request_params.append('schoolId')
values_params.append(SQLTool.formatstring(schoolid))
if province!='':
request_params.append('province')
values_params.append(SQLTool.formatstring(province))
if city!='':
request_params.append('city')
values_params.append(SQLTool.formatstring(city))
if starttime!='':
request_params.append('starttime')
values_params.append(SQLTool.formatstring(starttime))
table=localconfig.schooltable
DBhelp=SQLTool.DBmanager()
DBhelp.connectdb()
tempresult=DBhelp.updatetableinfo_byparams([table],request_params,values_params,wset_params,wand_params)
DBhelp.closedb()
return tempresult
|
[
"nanshihui@qq.com"
] |
nanshihui@qq.com
|
56922e0d19840031cf2fd989884a3f68c517958f
|
28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4
|
/koku/api/resource_types/azure_regions/view.py
|
903367bda356e2193783aa57648a5dc9d0784689
|
[
"Apache-2.0"
] |
permissive
|
luisfdez/koku
|
43a765f6ba96c2d3b2deda345573e1d97992e22f
|
2979f03fbdd1c20c3abc365a963a1282b426f321
|
refs/heads/main
| 2023-06-22T13:19:34.119984
| 2021-07-20T12:01:35
| 2021-07-20T12:01:35
| 387,807,027
| 0
| 1
|
Apache-2.0
| 2021-07-20T13:50:15
| 2021-07-20T13:50:14
| null |
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""View for Azure Region locations."""
from django.db.models import F
from django.utils.decorators import method_decorator
from django.views.decorators.vary import vary_on_headers
from rest_framework import filters
from rest_framework import generics
from api.common import CACHE_RH_IDENTITY_HEADER
from api.common.permissions.azure_access import AzureAccessPermission
from api.resource_types.serializers import ResourceTypeSerializer
from reporting.provider.azure.models import AzureCostSummaryByLocation
from reporting.provider.azure.openshift.models import OCPAzureCostSummaryByLocation
class AzureRegionView(generics.ListAPIView):
"""API GET list view for Azure Region locations."""
queryset = (
AzureCostSummaryByLocation.objects.annotate(**{"value": F("resource_location")})
.values("value")
.distinct()
.filter(resource_location__isnull=False)
)
serializer_class = ResourceTypeSerializer
permission_classes = [AzureAccessPermission]
filter_backends = [filters.OrderingFilter, filters.SearchFilter]
ordering = ["value"]
search_fields = ["$value"]
@method_decorator(vary_on_headers(CACHE_RH_IDENTITY_HEADER))
def list(self, request):
# Reads the users values for Azure subscription guid and displays values related to what the user has access to
user_access = []
openshift = self.request.query_params.get("openshift")
if openshift == "true":
self.queryset = (
OCPAzureCostSummaryByLocation.objects.annotate(**{"value": F("resource_location")})
.values("value")
.distinct()
.filter(resource_location__isnull=False)
)
if request.user.admin:
return super().list(request)
elif request.user.access:
user_access = request.user.access.get("azure.subscription_guid", {}).get("read", [])
self.queryset = self.queryset.values("value").filter(subscription_guid__in=user_access)
return super().list(request)
|
[
"noreply@github.com"
] |
luisfdez.noreply@github.com
|
ef24ba60db7198ba0b519cb7a3e69990ba8cea61
|
87bb2b9258c887e8fbcaca08d18e5d95ae96462d
|
/Codewars/Python/6kyu/6kyu_Sort the odd.py
|
8cedc8b089fe3134f4a5c46a38957c62ca3278d3
|
[] |
no_license
|
KonradMarzec1991/Codewars-LeetCode
|
a9e4d09f4271fecb3a7fc1ee436358ac1bbec5e4
|
442113532158f5a3ee7051a42e911afa5373bb5f
|
refs/heads/master
| 2023-04-21T17:04:37.434876
| 2021-05-11T21:47:14
| 2021-05-11T21:47:14
| 166,555,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
def sort_array(source_array):
sorted_odds = sorted([d for d in source_array if d % 2 == 1])
startpoint = 0
for d in range(len(source_array)):
if source_array[d] % 2 == 1:
source_array[d] = sorted_odds[startpoint]
startpoint += 1
return source_array
|
[
"konrimarzec@gmail.com"
] |
konrimarzec@gmail.com
|
82519a5e2771413edc95e5dee8e2e066d793d518
|
e838076bc1c8aedbb8c77710b1a1a32efc3a4da1
|
/pc1/models.py
|
1e61f611c4a5dbf401dcd01d926e4bc857bb4b2f
|
[] |
no_license
|
abbasgis/ferrp
|
5f2f7768f0e38e299498c2e74379311698b6321f
|
77736c33e7ec82b6adf247a1bf30ccbc4897f02e
|
refs/heads/master
| 2023-05-25T09:59:45.185025
| 2021-06-12T09:15:07
| 2021-06-12T09:15:07
| 376,236,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,549
|
py
|
from django.db import models
# Create your models here.
class AdpDraft201718Vw(models.Model):
gs_no = models.TextField(primary_key=True) # This field type is a guess.
s_name = models.TextField(blank=True, null=True) # This field type is a guess.
district = models.TextField(blank=True, null=True) # This field type is a guess.
tehsil = models.TextField(blank=True, null=True) # This field type is a guess.
s_type = models.TextField(blank=True, null=True) # This field type is a guess.
sec_name = models.TextField(blank=True, null=True) # This field type is a guess.
sec_id = models.TextField(blank=True, null=True) # This field type is a guess.
approval_date = models.TextField(blank=True, null=True) # This field type is a guess.
cost_total = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_aid = models.TextField(blank=True, null=True) # This field type is a guess.
local_capital = models.TextField(blank=True, null=True) # This field type is a guess.
local_revenue = models.TextField(blank=True, null=True) # This field type is a guess.
capital_total = models.TextField(blank=True, null=True) # This field type is a guess.
revenue_total = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_capital = models.TextField(blank=True, null=True) # This field type is a guess.
foreign_revenue = models.TextField(blank=True, null=True) # This field type is a guess.
allocation = models.TextField(blank=True, null=True) # This field type is a guess.
exp_upto_june = models.TextField(blank=True, null=True) # This field type is a guess.
projection_2017_18 = models.TextField(db_column='projection_2017-18', blank=True,
null=True) # Field renamed to remove unsuitable characters. This field type is a guess.
projection_2018_19 = models.TextField(db_column='projection_2018-19', blank=True,
null=True) # Field renamed to remove unsuitable characters. This field type is a guess.
throw_forward = models.TextField(blank=True, null=True) # This field type is a guess.
monitoring = models.TextField(blank=True, null=True) # This field type is a guess.
start_date = models.TextField(blank=True, null=True)
end_date = models.TextField(blank=True, null=True)
cost_total_adp_origional = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'adp_draft_201718_vw'
class TblSchemesHistory(models.Model):
id = models.AutoField(primary_key=True)
gs_no = models.IntegerField(blank=True, null=True)
project_location = models.TextField(blank=True, null=True)
authorities_responsible = models.TextField(blank=True, null=True)
plan_provision = models.TextField(blank=True, null=True)
project_objectives = models.TextField(blank=True, null=True)
annual_operating_cost = models.TextField(blank=True, null=True)
capital_cost_estimates = models.TextField(blank=True, null=True)
physical_plan = models.TextField(blank=True, null=True)
financial_plan = models.TextField(blank=True, null=True)
financial_plan_text = models.TextField(blank=True, null=True)
gantt_chart = models.TextField(blank=True, null=True)
demand_and_supply_analysis = models.TextField(blank=True, null=True)
benefits_of_the_projects_analysis = models.TextField(blank=True, null=True)
implementation_schedule = models.TextField(blank=True, null=True)
ms_and_mp = models.TextField(blank=True, null=True)
additional_projects_decisions_required = models.TextField(blank=True, null=True)
certified = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_schemes_history'
class TblSchemesAnnexure(models.Model):
id = models.TextField(primary_key=True)
gs_no = models.TextField(blank=True, null=True)
annexure_title = models.TextField(blank=True, null=True)
annexure_data = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_scheme_annexure'
class TblHelp(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
section_name = models.TextField(blank=True, null=True)
info_name = models.TextField(blank=True, null=True) # This field type is a guess.
help_image = models.BinaryField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tbl_help'
|
[
"abbas123@abc"
] |
abbas123@abc
|
ef1c013cd55b95725b787557e86769b4fd758760
|
e3c8f786d09e311d6ea1cab50edde040bf1ea988
|
/Incident-Response/Tools/cyphon/cyphon/alerts/migrations/0011_auto_20170815_1432.py
|
83a21b67e06bc5f594c3731289b36f09f4baa58f
|
[
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"MIT"
] |
permissive
|
foss2cyber/Incident-Playbook
|
d1add8aec6e28a19e515754c6ce2e524d67f368e
|
a379a134c0c5af14df4ed2afa066c1626506b754
|
refs/heads/main
| 2023-06-07T09:16:27.876561
| 2021-07-07T03:48:54
| 2021-07-07T03:48:54
| 384,988,036
| 1
| 0
|
MIT
| 2021-07-11T15:45:31
| 2021-07-11T15:45:31
| null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-15 18:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('alerts', '0010_remove_alert_tags'),
]
operations = [
migrations.AlterField(
model_name='alert',
name='distillery',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='alerts', related_query_name='alerts', to='distilleries.Distillery'),
),
]
|
[
"a.songer@protonmail.com"
] |
a.songer@protonmail.com
|
1d9066b6f8c175153341870908a2697e4398d8bf
|
e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163
|
/ScientificComputing/ch14/filter_equalizer.py
|
8d3830bd050cd0f33b823f8ec982a021af94ccdc
|
[] |
no_license
|
socrates77-sh/learn
|
a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b
|
ae50978023f6b098b168b8cca82fba263af444aa
|
refs/heads/master
| 2022-12-16T16:53:50.231577
| 2019-07-13T13:52:42
| 2019-07-13T13:52:42
| 168,442,963
| 0
| 0
| null | 2022-12-08T05:18:37
| 2019-01-31T01:30:06
|
HTML
|
UTF-8
|
Python
| false
| false
| 860
|
py
|
# -*- coding: utf-8 -*-
import scipy.signal as signal
import pylab as pl
import math
import numpy as np
def design_equalizer(freq, Q, gain, Fs):
'''设计二次均衡滤波器的系数'''
A = 10**(gain/40.0)
w0 = 2*math.pi*freq/Fs
alpha = math.sin(w0) / 2 / Q
b0 = 1 + alpha * A
b1 = -2*math.cos(w0)
b2 = 1 - alpha * A
a0 = 1 + alpha / A
a1 = -2*math.cos(w0)
a2 = 1 - alpha / A
return [b0/a0, b1/a0, b2/a0], [1.0, a1/a0, a2/a0]
pl.figure(figsize=(8, 4))
for freq in [1000, 2000, 4000]:
for q in [0.5, 1.0]:
for p in [5, -5, -10]:
b, a = design_equalizer(freq, q, p, 44100)
w, h = signal.freqz(b, a)
pl.semilogx(w/np.pi*44100, 20*np.log10(np.abs(h)))
pl.xlim(100, 44100)
pl.xlabel(u"频率(Hz)")
pl.ylabel(u"振幅(dB)")
pl.subplots_adjust(bottom=0.15)
pl.show()
|
[
"zhwenrong@sina.com"
] |
zhwenrong@sina.com
|
04dafde3bc7f94f93d9f00a64036a3aba00ae0e4
|
caf0ba85f1c7a2b7208e7f0acebb3c047b17b0ba
|
/4_py_libro_1_pydroid/venv/4_py_libro_1_pydroid/COLECCIONES/py_1_collecciones.py
|
f5b5a8874953870d8fbe5e698201b48134170b5f
|
[] |
no_license
|
JAreina/python
|
12ca9bd5467420a813ac3f33b0adba6cd492f855
|
3b9ac8d37ab2abe70e34043857f96a76c19468c8
|
refs/heads/master
| 2020-03-22T07:57:31.675271
| 2018-09-12T06:38:49
| 2018-09-12T06:38:49
| 139,735,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
import collections
c = collections
# JAreina
a = c.Counter([2,2,8,5,1,2,3,4,5,8])
print(a)
b = 'en un lugar de la mancha'
print( c.Counter(b))
d = ['a',"b",'c',"c",'z']
print(c.Counter(d))
print("\n ::::::::: update counter::::::::::::\n")
e = c.Counter()
print(e)
e.update("hola hola")
print(e)
#JAreina
e.update( {"a": 100} )
print(e)
|
[
"jareinafdez@gmail.com"
] |
jareinafdez@gmail.com
|
2f44a2a3e2304fa716d563c22a881c2c0fbf5ef7
|
257bd63361aa846ffdacdc15edaecf84c6364e78
|
/psou/pro1/pack3/class_ex9.py
|
b5fb7d6e7b2028d16e4b7c2b560541e4fb51073b
|
[] |
no_license
|
gom4851/hcjeon
|
86dcfd05ce47a13d066f13fe187d6a63142fb9fe
|
59a00ca9499f30e50127bb16eb510553e88ace43
|
refs/heads/master
| 2020-06-04T23:16:08.632278
| 2019-01-15T09:54:08
| 2019-01-15T09:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
'''
Created on 2018. 11. 8.
상속
'''
class Person:
say = "난 사람이야~"
nai = '20'
__kor = 59 # 앞에 __ 해주면 private가 됨.
def __init__(self, nai):
print('Person 생성자')
self.nai = nai
def printInfo(self):
print('나이:{}, 이야기:{}'.format(self.nai, self.say))
def hello(self):
print('안녕')
print('hello에서 kor : ', self.__kor)
@staticmethod
def sbs(tel):
print('sbs_tel : ', tel) # 다른 멤버와 상관없이
# self.hello # 멤버와 상관없는 독립적 수행할 때 사용.(self와 상관없ㅇㅣ..)
@classmethod # 프로테드 부를때..?
def mbc(cls):
print('mbc_', cls.say, cls.nai, cls.__kor)
p = Person('22')
p.printInfo()
p.hello()
print('**' * 20)
class Employee(Person):
say = "일하는 동물" # 만들어진 객체에 변수 먼저 확인.
subject = "부가"
def __init__(self):
print('Employee 생성자')
def EprintInfo(self):
#printInfo() 이렇게하면 모듈의 함수를 찾는 것.
self.printInfo()
super().printInfo()
e = Employee()
print(e.say, e.nai, e.subject)
e.printInfo()
e.EprintInfo()
print("**" * 20)
class Worker(Person):
def __init__(self, nai):
print('Worker 생성자')
self.nai = nai
def WprintInfo(self):
super().printInfo()
w = Worker('30')
w.WprintInfo()
print("~~" * 20)
class Programmer(Worker):
def __init__(self, nai):
print("Programmer 생성자")
#super().__init__(nai) # Bound
Worker.__init__(self, nai) # UnBound
def printInfo(self):
print("오바라이딩 메소드")
def WprintInfo(self):
self.printInfo() # PerSon까지 감.
#print('Worker에서 kor : ', self._kor)
pr = Programmer(36)
pr.WprintInfo()
print()
a = 5
print(type(a))
print(Person.__bases__) # 현재 클래스의 부모 클래스 타입 확인
print(Programmer.__bases__)
pr.sbs('111-1111')
Person.sbs('222-2222')
pr.mbc()
Person.mbc()
|
[
"wer104@naver.com"
] |
wer104@naver.com
|
c4efff0966532f4a7d4d8ee3839194c7e641bf5c
|
91668f2d4404dd3d85b598b15b852d5a2eeeec6a
|
/setup.py
|
27912b3549aa77181a86318e03cafa84b6cf5e87
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
r-swilderd/mi-instrument
|
b4fd841f05e7c0522580b73f145c2a826ccf21d6
|
34b60925a01d6cf89db254fdc75566c59ccba144
|
refs/heads/master
| 2021-01-17T21:56:34.298829
| 2015-08-29T23:02:31
| 2015-08-29T23:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
#!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import os
import sys
# Add /usr/local/include to the path for macs, fixes easy_install for several packages (like gevent and pyyaml)
if sys.platform == 'darwin':
os.environ['C_INCLUDE_PATH'] = '/usr/local/include'
version = '0.2.1'
setup( name = 'marine-integrations',
version = version,
description = 'OOINet Marine Integrations',
url = 'https://github.com/ooici/marine-integrations',
download_url = 'http://sddevrepo.oceanobservatories.org/releases/',
license = 'BSD',
author = 'Ocean Observatories Initiative',
author_email = 'contactooici@oceanobservatories.org',
keywords = ['ooici'],
packages = find_packages(),
dependency_links = [
'http://sddevrepo.oceanobservatories.org/releases/',
'https://github.com/ooici/pyon/tarball/master#egg=pyon',
#'https://github.com/ooici/utilities/tarball/v2012.12.12#egg=utilities-2012.12.12',
],
test_suite = 'pyon',
entry_points = {
'console_scripts' : [
'package_driver=ion.idk.scripts.package_driver:run',
'start_driver=ion.idk.scripts.start_driver:run',
'test_driver=ion.idk.scripts.test_driver:run',
],
},
install_requires = [
'gitpy==0.6.0',
'snakefood==1.4',
'ntplib>=0.1.9',
'apscheduler==2.1.0',
#'utilities',
],
)
|
[
"petercable@gmail.com"
] |
petercable@gmail.com
|
92f42f9f2b8c96f0d8e83bcaa79b8813312fbb2e
|
ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3
|
/controls/Control_Toolbox/steady_state_error.py
|
25f782973e80f3f0f6a329e2308fc7e37a7425c8
|
[] |
no_license
|
cmontalvo251/Python
|
293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05
|
2b12ce043ee41e08537cfb62301c6a55d4661e04
|
refs/heads/master
| 2023-06-22T21:50:21.225067
| 2023-06-14T13:42:16
| 2023-06-14T13:42:16
| 229,313,158
| 8
| 3
| null | 2021-07-31T16:01:54
| 2019-12-20T18:03:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
###Integrate an ordinary differential equation
#in MATLAB that's using the function ode45.
#in Python we're going to use the Scipy toolbox and odeint
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as I
import control as ctl
import scipy.signal as S
import scipy.linalg as slin
plt.close("all")
####Zeros Poles and Gains
###Zeros are the roots of the numerator
####Poles are the roots of the denominator
####Gain is the leading coefficient of numerator/leading coefficient of the denominator
zeros = []
poles = [-3.0]
gain = 3.0
[N,D] = S.zpk2tf(zeros,poles,gain)
G = ctl.tf(N,D)
tout = np.linspace(0,5,1000)
tout,yout = ctl.step_response(G,tout)
zeros = []
poles = 0
gain = 2.0
[N,D] = S.zpk2tf(zeros,poles,gain)
C = ctl.tf(N,D)
sys_closed_loop = C*G/(1+C*G)
toutc,youtc = ctl.step_response(sys_closed_loop,tout)
plt.plot(tout,yout,'r-',label='Open Loop')
plt.plot(tout,youtc,'g-',label='Closed Loop')
plt.xlabel('Time (sec)')
plt.ylabel('State')
plt.legend()
plt.grid()
plt.show()
|
[
"cmontalvo251@gmail.com"
] |
cmontalvo251@gmail.com
|
fdc34a5acdc9df05688e643c6e3939d0f1cfc1a2
|
ca7162adc548c5937ebedd6234b40de7294e2da1
|
/11-dababase/sqlite/createTable.py
|
b5b8058f153badef698c5a031aec4709f76832e6
|
[] |
no_license
|
meloLeeAnthony/PythonLearn
|
03c259d745b1ccdc039e9999889ab54be14ae020
|
9915ec5bb7048712a97539a9c5bce8743567b22a
|
refs/heads/master
| 2023-07-10T21:12:54.027143
| 2021-08-19T12:43:19
| 2021-08-19T12:43:19
| 289,487,502
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
# coding=utf-8
'''
1.导入sqlite3模块
2.创建连接 sqlite3.connect()
3.创建游标对象
4.编写创建表的sql语句
5.执行sql
6.关闭连接
'''
import sqlite3
conn = None
# 创建连接
try:
conn = sqlite3.connect('e:/sqlite3Demo/demo.db')
print('连接sqlite库成功')
except Exception as e:
print('连接sqlite库失败:', e)
if conn is not None:
# 创建游标对象
cur = conn.cursor()
# 编写创建表的sql语句
sql = '''create table t_person(
pno INTEGER primary key autoincrement,
pname VARCHAR not null,
age INTEGER
)'''
try:
# 执行sql语句
cur.execute(sql)
print('创建表成功')
except Exception as e:
print(e)
print('创建表失败:', e)
finally:
# 关闭游标
cur.close()
# 关闭连接
conn.close()
|
[
"li.chun158@gmail.com"
] |
li.chun158@gmail.com
|
1eb5b10cb25082008a50ed8a90198526794f3f9b
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_http_get_py3.py
|
d9ca73f638183794cf20b5a1d7c59793e37d8b7d
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContainerHttpGet(Model):
"""The container Http Get settings, for liveness or readiness probe.
All required parameters must be populated in order to send to Azure.
:param path: The path to probe.
:type path: str
:param port: Required. The port number to probe.
:type port: int
:param scheme: The scheme. Possible values include: 'http', 'https'
:type scheme: str or ~azure.mgmt.containerinstance.models.enum
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'scheme': {'key': 'scheme', 'type': 'str'},
}
def __init__(self, *, port: int, path: str=None, scheme=None, **kwargs) -> None:
super(ContainerHttpGet, self).__init__(**kwargs)
self.path = path
self.port = port
self.scheme = scheme
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
06d91053430128ffbd2e417b7711666eb0bcf41a
|
5dd47abf7061201d9378e73e51f08fbb314ba2fd
|
/envdsys/envdaq/migrations_old/0008_auto_20190215_2305.py
|
aaa12d1357f80fc2772ac6d5b38aa0b5464dee40
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/envDataSystem
|
4d264ae5209015e4faee648f37608d68a4461d0a
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
refs/heads/master
| 2023-02-23T22:33:14.334737
| 2021-07-22T01:09:16
| 2021-07-22T01:09:16
| 191,809,007
| 1
| 0
|
Unlicense
| 2023-02-08T00:45:54
| 2019-06-13T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 448
|
py
|
# Generated by Django 2.1.5 on 2019-02-15 23:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('envdaq', '0007_auto_20190215_2300'),
]
operations = [
migrations.RemoveField(
model_name='devicedef',
name='contacts',
),
migrations.RemoveField(
model_name='instrumentdef',
name='measurements',
),
]
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
7e94fa976fa69e30a6a8fecb112d0151ab64e36a
|
14d66cef63d9e540f4a7dd76e8810b7f39c9e536
|
/viewer.py
|
81b3111c4649653ed33f54c8cbfa293e9a3acd44
|
[
"MIT"
] |
permissive
|
YarinAVI/GraphLayout
|
5327daebdb45f8273cf75c53341bc02f42d293c7
|
187ea93b06730186ec25dd71a28dd2ccdfaa4a7c
|
refs/heads/master
| 2021-06-17T04:21:02.746604
| 2017-05-31T16:23:20
| 2017-05-31T16:23:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,692
|
py
|
import layout
import render
import wx
TESTS = [
[('t', 'te'), ('t', 'ti'), ('t', 'to'), ('te', 'tea'), ('te', 'ten'), ('tea', 'team'), ('ti', 'tin'), ('tin', 'tine'), ('to', 'ton'), ('ton', 'tone')],
[(5, 11), (11, 10), (11, 2), (3, 10), (3, 8), (8, 9), (11, 9), (7, 8), (7, 11)],
[(1, 2), (1, 5), (2, 5), (2, 3), (3, 4), (4, 5), (4, 6)],
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)],
[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)],
[(0, 1), (1, 2), (2, 0)],
[(1, 2), (1, 5), (1, 8), (5, 6), (2, 3), (3, 4), (4, 2), (6, 7), (6, 8), (6, 3)],
[(1, 2), (1, 3), (1, 4), (2, 4), (2, 5), (3, 6), (4, 3), (4, 6), (4, 7), (5, 4), (5, 7), (7, 6)],
[(1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 1), (1, 4), (2, 5), (3, 6)],
[(1, 3), (3, 2), (2, 1), (3, 5), (4, 1), (4, 2), (4, 12), (4, 13), (5, 6), (5, 8), (6, 7), (6, 8), (6, 10), (7, 10), (8, 9), (8, 10), (9, 5), (9, 11), (10, 9), (10, 11), (10, 14), (11, 12), (11, 14), (12, 13), (13, 11), (13, 15), (14, 13), (15, 14)],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)],
[(0, 1), (0, 3), (1, 4), (1, 2), (2, 5), (3, 4), (3, 6), (4, 5), (4, 7), (5, 8), (6, 7), (7, 8)],
]
class View(wx.Panel):
def __init__(self, parent):
super(View, self).__init__(parent)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_CHAR_HOOK, self.on_char)
self.index = -1
self.weights = {}
self.model = None
self.bitmap = None
wx.CallAfter(self.next)
def next(self):
self.index = (self.index + 1) % len(TESTS)
self.compute()
def compute(self):
edges = TESTS[self.index]
nodes = layout.layout(edges, self.weights)
self.set_model((edges, nodes))
def update(self):
if self.model is None:
return
cw, ch = self.GetClientSize()
bitmap = render.render(cw, ch, *self.model)
self.set_bitmap(bitmap)
def set_model(self, model):
self.model = model
self.update()
def set_weights(self, weights):
self.weights = weights
self.compute()
def set_bitmap(self, bitmap):
self.bitmap = bitmap
self.Refresh()
self.Update()
def on_char(self, event):
event.Skip()
if event.GetKeyCode() == wx.WXK_ESCAPE:
self.GetParent().Close()
elif event.GetKeyCode() == wx.WXK_SPACE:
self.next()
def on_size(self, event):
event.Skip()
self.update()
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.Brush(render.BACKGROUND))
dc.Clear()
if self.bitmap is None:
return
cw, ch = self.GetClientSize()
bw, bh = self.bitmap.GetSize()
x = cw / 2 - bw / 2
y = ch / 2 - bh / 2
dc.DrawBitmap(self.bitmap, x, y)
dc.DrawText(str(self.index), 10, 10)
class Frame(wx.Frame):
def __init__(self):
super(Frame, self).__init__(None)
self.create_controls(self)
self.SetTitle('GraphLayout')
self.SetClientSize((800, 600))
self.Center()
def create_controls(self, parent):
panel = wx.Panel(parent)
self.view = self.create_view(panel)
sidebar = self.create_sidebar(panel)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.view, 1, wx.EXPAND)
sizer.Add(sidebar, 0, wx.EXPAND | wx.ALL, 10)
panel.SetSizer(sizer)
return panel
def create_view(self, parent):
return View(parent)
def create_sidebar(self, parent):
names = [
'edge_edge',
'rank',
'length',
'area',
]
sizer = wx.BoxSizer(wx.VERTICAL)
self.sliders = []
for name in names:
value = int(layout.WEIGHTS[name] * 10)
text = wx.StaticText(parent, -1, name)
slider = wx.Slider(parent, -1, value, 0, 100)
slider.name = name
slider.Bind(wx.EVT_SCROLL_THUMBRELEASE, self.on_slider)
self.sliders.append(slider)
sizer.Add(text)
sizer.Add(slider, 0, wx.EXPAND)
sizer.AddSpacer(10)
return sizer
def on_slider(self, event):
weights = {}
for slider in self.sliders:
weights[slider.name] = slider.GetValue() / 10.0
self.view.set_weights(weights)
def main():
app = wx.App(None)
frame = Frame()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
[
"fogleman@gmail.com"
] |
fogleman@gmail.com
|
ef5b018a2b5ea2a4f02c3054530d9c410298db2c
|
bc5d0c3e950ebd9a7fbd33fbbcad94979cbe344f
|
/src/scratch.py
|
1e0bc9d7ea1f286057ac7e65ffcc5322727de2fe
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
bgshin/bert
|
44e7b23016087e7fda6396bd4432188918d2af28
|
5d13d62909f120ec097c606a0366b49887ea1baf
|
refs/heads/master
| 2020-04-06T11:41:44.788854
| 2018-12-03T04:48:56
| 2018-12-03T04:48:56
| 157,426,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
import os
import numpy as np
import tensorflow as tf
import collections
import tokenization
from run_classifier import SstProcessor, file_based_input_fn_builder, model_fn_builder, file_based_convert_examples_to_features
import modeling
import cPickle
def get_model_size(ckpt_fpath):
# Open TensorFlow ckpt
reader = tf.train.NewCheckpointReader(ckpt_fpath)
print('\nCount the number of parameters in ckpt file(%s)' % ckpt_fpath)
param_map = reader.get_variable_to_shape_map()
total_count = 0
for k, v in param_map.items():
if 'Momentum' not in k and 'global_step' not in k:
temp = np.prod(v)
total_count += temp
print('%s: %s => %d' % (k, str(v), temp))
print('Total Param Count: %d' % total_count)
# 324,935,430 ~324M
# tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
# result = tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? ")
# print(result)
# print(["hello", "!", "how", "are", "you", "?"])
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
TRAINED_MODDEL_DIR='../model/sst2/'
data_dir = '../data/glue_data/SST-2'
output_dir = '../model/sst2/eval/'
max_seq_length = 128
vocab_file='../data/cased_L-12_H-768_A-12/vocab.txt'
do_lower_case = True
eval_batch_size = 8
learning_rate=2e-5
# init_checkpoint='./data/cased_L-12_H-768_A-12/bert_model.ckpt'
init_checkpoint='../model/sst2/eval/model.ckpt-6313'
# init_checkpoint='../model/sst2/eval/checkpoint'
bert_config_file='../data/cased_L-12_H-768_A-12/bert_config.json'
get_model_size(init_checkpoint)
exit()
processor = SstProcessor()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
train_examples = processor.get_train_examples(data_dir)
trn_file = os.path.join(output_dir, "trn.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length, tokenizer, trn_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, max_seq_length, tokenizer, eval_file)
tst_examples = processor.get_test_examples(data_dir)
tst_file = os.path.join(output_dir, "tst.tf_record")
file_based_convert_examples_to_features(tst_examples, label_list,
max_seq_length, tokenizer,
tst_file)
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=10,
num_warmup_steps=10,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=32,
eval_batch_size=8,
predict_batch_size=8)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
embeddings = estimator.get_variable_value(estimator.get_variable_names()[12])
with open(output_dir+'embedding.cpkl', 'wb') as handle:
cPickle.dump(embeddings, handle)
result_predict = [val for val in estimator.predict(eval_input_fn)]
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
|
[
"nomolos79@gmail.com"
] |
nomolos79@gmail.com
|
3a9cb5d26818969cfb8af7ed4edd0f0d30c3c771
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Python/Core/Lib/encodings/utf_32_be.py
|
f84e9045e1470bfbee6b819f251ac345d9b60040
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149
| 2018-03-08T01:22:49
| 2018-03-08T01:22:49
| 123,527,268
| 2
| 0
| null | 2018-03-02T03:48:31
| 2018-03-02T03:48:30
| null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: utf_32_be.py
"""
Python 'utf-32-be' Codec
"""
import codecs
encode = codecs.utf_32_be_encode
def decode(input, errors='strict'):
return codecs.utf_32_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_32_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_32_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_32_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_32_be_decode
def getregentry():
return codecs.CodecInfo(name='utf-32-be', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
|
[
"francisck@protonmail.ch"
] |
francisck@protonmail.ch
|
0e3e32f77124f0e14236d74d16a1a23002091b70
|
ae10b60cb92a69146bfb05ef5dde735a0aa45d4b
|
/examples/Extended Application/matplotlib/examples/userdemo/annotate_simple_coord02.py
|
c8424d6389371288876249d81040115991437882
|
[
"MIT"
] |
permissive
|
kantel/nodebox-pyobjc
|
471cea4c5d7f1c239c490323186458a74edcc214
|
068ba64c87d607522a240ab60c3ba14f869f6222
|
refs/heads/master
| 2021-08-14T18:32:57.995445
| 2017-11-16T13:42:23
| 2017-11-16T13:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
"""
=======================
Annotate Simple Coord02
=======================
"""
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 2))
an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data",
va="center", ha="center",
bbox=dict(boxstyle="round", fc="w"))
an2 = ax.annotate("Test 2", xy=(0.5, 1.), xycoords=an1,
xytext=(0.5, 1.1), textcoords=(an1, "axes fraction"),
va="bottom", ha="center",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
fig.subplots_adjust(top=0.83)
pltshow(plt)
|
[
"karstenwo@web.de"
] |
karstenwo@web.de
|
0a8bd7608db9b29c07d9adbc6e136ba7c4662200
|
35e28d7705773eed54345af4440700522c9d1863
|
/deps/libgdal/gyp-formats/tsx.gyp
|
b8f05037aec95a26a96a811114431a86f6467c1c
|
[
"Apache-2.0"
] |
permissive
|
naturalatlas/node-gdal
|
0ee3447861bf2d1abc48d4fbdbcf15aba5473a27
|
c83e7858a9ec566cc91d65db74fd07b99789c0f0
|
refs/heads/master
| 2023-09-03T00:11:41.576937
| 2022-03-12T20:41:59
| 2022-03-12T20:41:59
| 19,504,824
| 522
| 122
|
Apache-2.0
| 2022-06-04T20:03:43
| 2014-05-06T18:02:34
|
C++
|
UTF-8
|
Python
| false
| false
| 245
|
gyp
|
{
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_tsx_frmt",
"type": "static_library",
"sources": [
"../gdal/frmts/tsx/tsxdataset.cpp"
],
"include_dirs": [
"../gdal/frmts/tsx"
]
}
]
}
|
[
"brian@thirdroute.com"
] |
brian@thirdroute.com
|
ffabd2487c7b9698475b0ebf663f97afebe11c6a
|
0f7666900a3d203481b1009fe3d9dd2186938c30
|
/gpu-example.py
|
e89103c48573461470473750392cc1d8d4e2ed0b
|
[] |
no_license
|
MZ195/Cupy-GPU-Example
|
4a7d99c25e9d26213c3f27b6618b6f8c8cbc4393
|
108b920848e60b3e2706749ca97404572433e130
|
refs/heads/master
| 2022-10-29T23:45:48.093569
| 2020-06-13T21:09:04
| 2020-06-13T21:09:04
| 272,086,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
import numpy as np
from timeit import default_timer as timer
from numba import vectorize
NUM_ELEMENTS = 100000000
# This is the CPU version.
def vector_add_cpu(a, b):
c = np.zeros(NUM_ELEMENTS, dtype=np.float32)
for i in range(NUM_ELEMENTS):
c[i] = a[i] + b[i]
return c
# This is the GPU version. Note the @vectorize decorator. This tells
# numba to turn this into a GPU vectorized function.
@vectorize(["float32(float32, float32)"], target='cuda')
def vector_add_gpu(a, b):
return a + b;
def main():
a_source = np.float32(np.random.rand(NUM_ELEMENTS))
b_source = np.float32(np.random.rand(NUM_ELEMENTS))
# Time the CPU function
start = timer()
vector_add_cpu(a_source, b_source)
vector_add_cpu_time = timer() - start
# Time the GPU function
start = timer()
vector_add_gpu(a_source, b_source)
vector_add_gpu_time = timer() - start
# Report times
print("CPU function took %f seconds." % vector_add_cpu_time)
print("GPU function took %f seconds." % vector_add_gpu_time)
return 0
if __name__ == "__main__":
main()
|
[
"40984264+MZ195@users.noreply.github.com"
] |
40984264+MZ195@users.noreply.github.com
|
31e36abf085c1f9a230b668343ec294935b1c1da
|
91fe8f479fa921fa84111d19222a5c6aa6eff030
|
/apps/django-web/learning_log/ll_env/Scripts/django-admin.py
|
8785f03fb524b48fbb8c6393ffc1f4c9ea3ed203
|
[] |
no_license
|
romanticair/python
|
2055c9cdaa46894c9788d5797643283786ed46dd
|
6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141
|
refs/heads/master
| 2022-11-03T17:17:17.608786
| 2019-07-05T07:07:29
| 2019-07-05T07:07:29
| 195,356,190
| 0
| 1
| null | 2022-10-14T20:51:14
| 2019-07-05T07:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
#!l:\mypythonprogr\somepythonprojects\learning_log\ll_env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"1024519570@qq.com"
] |
1024519570@qq.com
|
f32ae2236fb684777b438596d850abf6885dc83b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/qLMZ2hEvrhRSSSnQw_20.py
|
dba82c5c1e6082f3f6e2d972820ab53ab7f449e3
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
"""
Graded lexicographic order (grlex order for short) is a way of ordering words
that:
1. First orders words by length.
2. Then orders words of the same size by their dictionary order.
For example, in grlex order:
* "tray" < "trapped" since "tray" has length 4 while "trapped" has length 7.
* "trap" < "tray" since both have length 4, but "trap" comes before "tray" in the dictionary.
Given a list of words, return that list in grlex order.
### Examples
make_grlex(["small", "big"]) ➞ ["big", "small"]
make_grlex(["cat", "ran", "for", "the", "rat"]) ➞ ["cat", "for", "ran", "rat", "the"]
make_grlex(["this", "is", "a", "small", "test"]) ➞ ["a", "is", "test", "this", "small"]
### Notes
N/A
"""
def make_grlex(lst):
lst.sort()
lst.sort(key=len)
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1c54e78c3a9fa11c3884d713d206b69fd669a135
|
a08d885cb9150d7e84f5ffbf0c9734893105a898
|
/2021/Day 04/giant_squid.py
|
113e0335b32c0ec58d8789010615a252f8d92b66
|
[] |
no_license
|
vhsw/Advent-of-Code
|
ab422c389340a1caf2ec17c5db4981add6433fbe
|
3c1dac27667472202ab15098c48efaac19348edf
|
refs/heads/master
| 2022-12-29T03:56:59.648395
| 2022-12-26T11:01:45
| 2022-12-26T11:01:45
| 162,491,163
| 0
| 0
| null | 2022-05-10T08:43:32
| 2018-12-19T21:10:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
"""Day 4: Giant Squid"""
from typing import Iterable
with open("2021/Day 04/input.txt", encoding="utf-8") as fp:
DATA = fp.read().strip()
def part1(data: str):
"""Part 1 solution"""
nums, boards = parse(data)
for num in nums:
for board in boards:
for line in board:
replace(line, num)
if check(board):
return num * score(board)
raise ValueError(data)
def part2(data):
"""Part 2 solution"""
nums, boards = parse(data)
board_finished = [False] * len(boards)
for num in nums:
for idx, board in enumerate(boards):
if board_finished[idx]:
continue
for line in board:
replace(line, num)
if check(board):
board_finished[idx] = True
if all(board_finished):
return num * score(board)
raise ValueError(data)
def parse(data: str):
nums, *boards = data.split("\n\n")
return parse_nums(nums), parse_boards(boards)
def parse_nums(nums: str):
return list(map(int, nums.split(",")))
def parse_boards(boards: list[str]):
return [
[list(map(int, line.split())) for line in board.splitlines()]
for board in boards
]
def replace(line: list[int | None], value: int):
for idx, _ in enumerate(line):
if line[idx] == value:
line[idx] = None
def check(board: list[list[str]]):
return check_lines(board) or check_lines(zip(*board))
def check_lines(board: Iterable[Iterable[str]]):
return any(all(num is None for num in line) for line in board)
def score(board):
return sum(num for line in board for num in line if num)
if __name__ == "__main__":
print(f"Part 1: { part1(DATA) }")
print(f"Part 2: { part2(DATA) }")
|
[
"nevermind1025@gmail.com"
] |
nevermind1025@gmail.com
|
861709efdf3acd7f6154b69f466c98536af3d22c
|
98e1716c1c3d071b2fedef0ac029eb410f55762c
|
/part13-introduction-data-visualization/No33-Multiple-time-series-on-common-axes.py
|
d1207e9079b0d4e197a6b8b3812ed67aab364a3e
|
[] |
no_license
|
iamashu/Data-Camp-exercise-PythonTrack
|
564531bcf1dff119949cbb75e1fd63d89cb2779f
|
c72a4e806494f0e263ced9594597dc8882c2131c
|
refs/heads/master
| 2020-07-22T00:23:12.024386
| 2019-04-12T09:24:42
| 2019-04-12T09:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
#Multiple time series on common axes
'''
For this exercise, you will construct a plot showing four time series stocks on the same axes. The time series in question are represented in the session using the identifiers aapl, ibm, csco, and msft. You'll generate a single plot showing all the time series on common axes with a legend.
Instructions
Plot the aapl time series in blue with a label of 'AAPL'.
Plot the ibm time series in green with a label of 'IBM'.
Plot the csco time series in red with a label of 'CSCO'.
Plot the msft time series in magenta with a label of 'MSFT'.
Specify a rotation of 60 for the xticks with plt.xticks().
Add a legend in the 'upper left' corner of the plot.
'''
# Code
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Plot the aapl time series in blue
plt.plot(aapl, color='blue', label='AAPL')
# Plot the ibm time series in green
plt.plot(ibm, color='green', label='IBM')
# Plot the csco time series in red
plt.plot(csco, color='red', label='CSCO')
# Plot the msft time series in magenta
plt.plot(msft, color='magenta', label='MSFT')
# Add a legend in the top left corner of the plot
plt.legend(loc='upper left')
# Specify the orientation of the xticks
plt.xticks(rotation=60)
# Display the plot
plt.show()
|
[
"beiran@hotmail.com"
] |
beiran@hotmail.com
|
8718fb18f40bed47885972a98c7d3c06d6a3ca6c
|
9db8e7bbd09eb07126a7f0c14e2f3af86bf6e8d9
|
/datasets/centralia/convert.py
|
51efa94fe2198d2ccf0de3656af8e6b03e2c11b4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jayschauer/TCPD
|
ddeb193eca2fa9ea04b791025fe529dfd3ee4686
|
f99ab1decc165707115fd60e991ac1c12091f334
|
refs/heads/master
| 2022-12-05T02:21:16.517774
| 2020-09-02T18:56:49
| 2020-09-02T18:56:49
| 292,368,325
| 0
| 0
|
MIT
| 2020-09-02T18:52:00
| 2020-09-02T18:52:00
| null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--subsample",
help="Number of observations to skip during subsampling",
type=int,
)
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
rows = [l.strip().split("\t") for l in fp]
time = []
values = []
for year, pop in rows:
time.append(year)
values.append(int(pop))
name = "centralia"
longname = "Centralia Pennsylvania Population"
time_fmt = "%Y"
series = [{"label": "Population", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
|
[
"gertjanvandenburg@gmail.com"
] |
gertjanvandenburg@gmail.com
|
ac415525282185598f7818ce48ba05afe6778b4c
|
78efa54b2b253f99ea7e073f783e6121c20cdb52
|
/Codechef/Substraction Game 1.py
|
70eba9c0690d035feb1d0ea1b9eec2fb43d7f3d6
|
[] |
no_license
|
NishchaySharma/Competitve-Programming
|
32a93581ab17f05d20129471f7450f34ec68cc53
|
1ec44324d64c116098eb0beb74baac7f1c3395bb
|
refs/heads/master
| 2020-04-08T04:02:46.599398
| 2020-01-01T15:51:39
| 2020-01-01T15:51:39
| 159,000,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
def gcd(a:int,b:int)->int:
if a==0 or b==0: return a+b
else: return gcd(b%a,a)
for _ in range(int(input())):
n=int(input())
arr=list(map(int,input().split()))
res=arr[0]
for i in arr[1:]:
res=gcd(res,i)
if res==1: break
print(res)
|
[
"noreply@github.com"
] |
NishchaySharma.noreply@github.com
|
20906ab3c18a8740b0c15f21a626b89b87f75c8d
|
10fbe5526e5f0b8588b65f70f088cd86b6e9afbe
|
/qqpppzas/migrations/0015_auto_20150218_1630.py
|
8c6435040612046c9d19f24e8b395845cbb10c31
|
[] |
no_license
|
MarkusH/django-migrations-benchmark
|
eb4b2312bb30a5a5d2abf25e95eca8f714162056
|
e2bd24755389668b34b87d254ec8ac63725dc56e
|
refs/heads/master
| 2016-09-05T15:36:45.250134
| 2015-03-31T23:44:28
| 2015-03-31T23:44:28
| 31,168,231
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cohutfvb', '0014_auto_20150218_1630'),
('qqpppzas', '0014_auto_20150218_1628'),
]
run_before = [
('ysgxuyu', '0012_delete_bmovnbnmed'),
]
operations = [
migrations.CreateModel(
name='Uxswpekqlt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('febtep', models.OneToOneField(null=True, related_name='+', to='cohutfvb.Ecgjvad')),
],
),
migrations.RemoveField(
model_name='shtlozkm',
name='wjznogs',
),
migrations.RemoveField(
model_name='vdscpy',
name='efspwnch',
),
migrations.AddField(
model_name='vdscpy',
name='tcyjunatyh',
field=models.CharField(default='', max_length=163),
),
]
|
[
"info@markusholtermann.eu"
] |
info@markusholtermann.eu
|
339fbbf64e0beffc337a085e31729f800c8127dd
|
d03582a9f9a853a07eeb36f746530dee29f7a258
|
/build/catkin_generated/generate_cached_setup.py
|
b68e8b3be2e20e0a06efc00fe0407cd24717e837
|
[] |
no_license
|
tanmayshankar/decision_making
|
6df835c6c26e9cb2f0fdfbec509f5a46bf3c98ae
|
fdd3b1ea127a89fb03181cb8f766e86e6d933eed
|
refs/heads/master
| 2021-01-21T04:48:16.259813
| 2016-07-15T17:43:06
| 2016-07-15T17:43:06
| 44,739,541
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/tanmay/indigo_workspace/sandbox/decision_making/build/devel/env.sh')
output_filename = '/home/tanmay/indigo_workspace/sandbox/decision_making/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"tanmay.shankar@gmail.com"
] |
tanmay.shankar@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.