blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f36eab7e9317370fbed4d199e089a935148128b
|
6fdf0ad8a70cfe666ab1cae331ddf751178b0f34
|
/Python/Arrays/problem_1051. Height Checker.py
|
679788265b355852106eb65beff9440a8fd1ef24
|
[] |
no_license
|
vigneshthiagarajan/Leetcode_prep
|
3aa46f90af084d6100cd61af28767e811c848d4e
|
1f087564e9b68f85d9974c3643538b8370ba82e3
|
refs/heads/main
| 2023-06-19T06:47:00.388621
| 2021-07-12T04:54:32
| 2021-07-12T04:54:32
| 356,921,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
class Solution:
def heightChecker(self, heights: List[int]) -> int:
num_indices_mismatch = 0
expected_heights = sorted(heights)
for i in range(len(heights)):
if heights[i] != expected_heights[i]:
num_indices_mismatch += 1
return num_indices_mismatch
|
[
"vigneshthiagaraj@gmail.com"
] |
vigneshthiagaraj@gmail.com
|
48ab5771787957eeb1c9c69b7eb86c08eda031ee
|
9378f00e13fa41b41cad1bb4dc733bcb46f259ef
|
/python/scripts/classifiers/SVM.py
|
05f35e4215ce796d9c424c42db6bcd0505515469
|
[] |
no_license
|
ghpaetzold/iconic-internship
|
0ac55c6e5289b569af2eb627a9e7025e0b9e9914
|
463f562dd1f3bdcb85954604ef2364ee5dcbef8e
|
refs/heads/master
| 2021-01-10T19:12:19.713265
| 2015-07-21T15:46:02
| 2015-07-21T15:46:02
| 37,980,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from sklearn import svm
import sys, numpy, pickle
from sklearn.preprocessing import normalize
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
def readXY(xf, yf):
X = []
for line in open(xf):
values = [float(v) for v in line.strip().split('\t')]
X.append(values)
Y = numpy.array([float(l.strip()) for l in open(yf)])
return X, Y
def writeLabels(labels, file):
c = -1
for l in labels:
c += 1
file.write(str(l) + '\n')
file.close()
def writeModel(classifier, model_file):
pickle.dump(classifier, open(model_file, "wb"))
C = float(sys.argv[1])
kernel = sys.argv[2]
degree = int(sys.argv[3])
gamma = float(sys.argv[4])
coef0 = float(sys.argv[5])
Xtr, Ytr = readXY(sys.argv[6], sys.argv[7])
Xte, Yte = readXY(sys.argv[8], sys.argv[9])
Xtr = normalize(Xtr, axis=0)
Xte = normalize(Xte, axis=0)
k = sys.argv[10]
if k!='all':
k = int(k)
selector = SelectKBest(f_classif, k=k).fit(Xtr, Ytr)
Xtr = selector.transform(Xtr)
Xte = selector.transform(Xte)
o = open(sys.argv[11], 'w')
model_file = sys.argv[12]
classifier = svm.SVC(C=C, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0)
classifier.fit(Xtr, Ytr)
labels = classifier.predict(Xte)
writeLabels(labels, o)
writeModel(classifier, model_file)
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
8be72ee78cc9d7e1cf6795076617cef99ea05d82
|
7cebfa2066e679e19993a5507e59d1979df3d4a8
|
/1_Basics/DataScienceAssignments/basicSearchEngine.py
|
eb134571d3ebac8f6c093cf7fa05c155bc04fe29
|
[
"Apache-2.0"
] |
permissive
|
Arunken/PythonScripts
|
833e9e43ccb29234a206027f1cda1d978718d5eb
|
702d0a3af7a9be3311f9da0afc5285d453f15484
|
refs/heads/master
| 2022-12-24T18:50:43.672779
| 2021-05-13T11:31:51
| 2021-05-13T11:31:51
| 237,631,027
| 0
| 0
|
Apache-2.0
| 2022-12-08T00:47:45
| 2020-02-01T15:01:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,737
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 13:17:19 2018
@author: SilverDoe
"""
import urllib
import collections
def get_page(url):
#return page html from url
try:
return [url,urllib.urlopen(url).read()]
except:
return [url,""]
def get_next_url(page):
#goes through page html from a starting position and finds the next url
start_link = page[1].find('<a href="http')
if start_link == -1:
return None, 0
end_link = page[1].find('"', start_link + len('<a href="'))
url = page[1][start_link + len('<a href="'): end_link]
return url, end_link
def get_all_links(page):
#returns all urls from a page
links = []
while True:
url,end_link = get_next_url(page)
if url:
links.append(url)
page[1] = page[1][end_link:]
else:
return links
def crawl_web(seed, to_crawl, crawled):
#calls get_all_links to crawl webpage, updates crawled and to_crawl.
to_crawl.remove(seed)
if seed not in crawled:
new_links = set(link for link in get_all_links(get_page(seed)))
to_crawl = to_crawl.union(new_links)
crawled.add(seed)
return crawled, to_crawl, new_links
def track_depth(url, maxdepth):
#sets depth of webcrawl, feeds seed url to webcrawler
depth = 0
tier = [[url]]
to_crawl = set([url])
crawled = set()
while depth < maxdepth:
next_tier = []
for next_url in tier[depth]:
crawled, to_crawl, new_links = crawl_web(next_url, to_crawl,
crawled)
next_tier += list(new_links)
tier.append(next_tier)
depth += 1
return tier, crawled, to_crawl
def get_next_string(page):
#finds string in html of page using paragraph markers
start_string = page[1].find('<p>')
if start_string == -1:
return None, 0
end_string = page[1].find('</p>', start_string + len('<p>'))
string = page[1][start_string + len('<p>'): end_string]
return string, end_string
def get_page_words(page):
#gets all strings on page and converts to word list
page_string = ''
to_remove = '#$%^&*._,1234567890+=<>/\()":;!?'
while True:
string, end_string = get_next_string(page)
if string:
page_string += " " + string
page[1] = page[1][end_string:]
else:
for i in to_remove:
page_string = page_string.replace(i, '').lower()
page_words = page_string.split()
return page_words
def word_bank(crawled):
#creates word index mapping url values to word keys
crawled = list(crawled)
word_count = {}
for url in crawled:
for word in get_page_words(get_page(url)):
if word in word_count:
if url in word_count[word]:
word_count[word][url] += 1
else:
word_count[word][url] = 1
elif len(word) < 15:
word_count[word] = {url: 1}
return word_count
def search_engine(target_string, word_count):
#searches word_bank for words in string, returns urls words are found at
targets = list(set(target_string.split()))
result =[]
for word in targets:
if word in word_count:
result += word_count[word].keys()
ans = collections.Counter(result).most_common()
return ans[0][0], ans[1][0], ans[2][0]
crawled = track_depth("http://www.wccftech.com/", 2)[1]
print("crawling done")
word_count = word_bank(crawled)
print("word_count done")
#print word_count
print(search_engine('starting blogs about', word_count))
|
[
"mail.arunken@gmail.com"
] |
mail.arunken@gmail.com
|
23d6e34bbac69811382efe75a0eaa6ccf8ac50e0
|
635c9f0501039f5a099849a3108e19de76092aea
|
/algorithm/ssafy_190325/부분집합합.py
|
a2aa237ea48bc4c554fc9fbb279759c67273b26f
|
[] |
no_license
|
Hansung-Lee/SSAFY
|
87ebea0808bb40381678d678e1035dc5fa2c2eb0
|
cdb7ae1bba0e98e733eed703da2c62217c319462
|
refs/heads/master
| 2020-04-14T20:03:05.975040
| 2019-05-16T08:57:21
| 2019-05-16T08:57:21
| 164,080,393
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
def subset(li):
for i in range(1<<len(li)):
temp = []
for j in range(len(li)):
if i & (1<<j):
temp.append(li[j])
if temp and sum(temp) == 0:
print(temp)
# def subset2(li,n,r):
# global temp
# if r == 0:
# print(temp)
# temp = []
# elif n < r:
# return
# else:
# temp.append(li[n-1])
# subset2(li, n-1, r-1)
# subset2(li, n-1, r)
li = [-1,3,-9,6,7,-6,1,5,4,-2]
subset(li)
# subset2(li,10,9)
|
[
"ajtwlsgkst@naver.com"
] |
ajtwlsgkst@naver.com
|
8489ac70eb0eaae6b07f33f76b0ef93c44e29136
|
5bb0b1ee382d2ffce5cf59cd9b90e75d66396335
|
/P12_视频网站视频真实上传日期_日期爬取_requests_re_匹配分组/001_Vixen_网站视频日期爬取_re查找_re分组匹配_日期匹配分组_交换顺序.py
|
e66d2cbf3eac4ada296d2b8a83f37194de16e510
|
[] |
no_license
|
FelixZFB/TZKT_Study_Note
|
d94ae82299fcce060da76cd4339b4182ab44f8c5
|
65f1089621d5236e3cddf37e6a3040556b4d0851
|
refs/heads/master
| 2021-06-26T09:16:17.051870
| 2020-12-01T02:14:39
| 2020-12-01T02:14:39
| 181,028,395
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,899
|
py
|
# -*- coding:utf-8 -*-
# project_xxx\venv\Scripts python
'''
Author: Felix
Email: xiashubai@gmail.com
Blog: https://blog.csdn.net/u011318077
Date: 2019/9/16 21:22
Desc:
'''
# 视频网址视频下面日期是视频上传日期,真实拍摄日期大多数都早一天
# 视频源码中的日期正好是早一天的真实日期,直接提取对应日期即可
import requests
import re
import time
def get_date(url_list):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3730.400 QQBrowser/10.5.3805.400",
}
# 循环URL地址
for url in url_list:
# 每间隔3秒请求一个页面
response = requests.get(url=url, headers=headers)
time.sleep(3)
# 获取页面源码
html = response.content.decode()
# 寻找出所有的视频真实日期(源码中同一个日期有三个),
# 但是,放在一个标签内部的有一个唯一的日期,两边加上标签>即可提取出唯一的日期
# 类似格式:>November 11, 2018<
date_list = re.findall(">([a-zA-Z]+ \d+, \d+)<", html)
with open("date.txt", "a", encoding="utf-8") as f:
for date in date_list:
# July 18, 2019修改为Vixen.19.07.18
# 先替换, 20为, Vixen.
date = date.replace(", 20", ", Vixen.")
# 匹配分组,交换日期的顺序,中间用.连接
date_new = re.sub(r'([a-zA-Z]+) (\d+), (Vixen.\d+)', r'\3.\1.\2', date)
# 分别进行月份替换,月份替换成数字月份
date_new = date_new.replace("January", "01")
date_new = date_new.replace("February", "02")
date_new = date_new.replace("March", "03")
date_new = date_new.replace("April", "04")
date_new = date_new.replace("May", "05")
date_new = date_new.replace("June", "06")
date_new = date_new.replace("July", "07")
date_new = date_new.replace("August", "08")
date_new = date_new.replace("September", "09")
date_new = date_new.replace("October", "10")
date_new = date_new.replace("November", "11")
date_new = date_new.replace("December", "12")
# 写入所有的日期
f.write(date_new + "\n")
# 最原始的日期也保存一份
with open("date_list.txt", "a", encoding="utf-8") as f:
f.write(url + ":爬取完成" + "\n")
f.write(str(date_list) + "\n")
print(url + ":爬取完成")
print(date_list)
if __name__ == '__main__':
url_list = ["https://www.vixen.com/videos?page=" + str(i) + "&size=12" for i in range(1, 21)]
get_date(url_list)
|
[
"18200116656@qq.com"
] |
18200116656@qq.com
|
c39633b0d4ddbeeb4101389894b53606ca92dd4e
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5765824346324992_0/Python/jcb/p2.py
|
bde3cb4839091e6513ee3b2edad179bc816deaa6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import fractions
def lcm(x, y):
return (x * y) / fractions.gcd(x, y)
def solve(f):
#parse
num_barbers, n = tuple([int(x) for x in f.readline().strip().split(" ")])
barbers = [[i + 1, int(x), 0] for i, x in enumerate(f.readline().strip().split(" "))]
#print num_barbers, n, barbers
if num_barbers != len(barbers):
print "Wrong number of barbers on line:", line
sys.exit(0)
l = reduce(lcm, [x[1] for x in barbers])
#print l
cycle = 0
for i in xrange(num_barbers):
cycle += l / barbers[i][1]
left = n % cycle
if left == 0:
left = cycle
while left > 0:
smallest = None
#print barbers
for i in xrange(num_barbers):
if barbers[i][2] == 0:
barbers[i][2] = barbers[i][1]
left -= 1
if left == 0:
return barbers[i][0]
if not smallest or barbers[i][2] < smallest:
smallest = barbers[i][2]
#print barbers
#print smallest
for i in xrange(num_barbers):
barbers[i][2] -= smallest
#print barbers
total = None
count = 0
f = sys.stdin
while f:
if not total:
total = int(f.readline().strip())
continue
elif count < total:
count += 1
print "Case #%d: %s" % (count, solve(f))
else:
break
if count < total:
print "Wrong number of test cases"
sys.exit(0)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
43efe6f40cb20df2c4eb6e4f4c25c9fcfb33a3e6
|
c4d1e606d2ebbeaaf3a4dee6f851a45dfa68dabe
|
/docs-crawler/docs/docs/spiders/numpy_spider.py
|
b4dae3615cf706dc8551bad586a9d824e7f85ac2
|
[
"Apache-2.0"
] |
permissive
|
niansong1996/threepio
|
4d4a976da5a199c77fca936861ba78cc3f45fa7c
|
5a143e045ca56560c29d03251cabc61cc8b982f6
|
refs/heads/master
| 2021-05-18T23:11:50.644479
| 2020-03-30T17:01:59
| 2020-03-30T17:01:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
import re
from docs.items import ApiItem
from scrapy.spiders import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from w3lib.html import remove_tags
class NumpySpider(CrawlSpider):
name = "numpy"
version = "1.17.0"
allowed_domains = ['scipy.org']
start_urls = [f'https://docs.scipy.org/doc/numpy/reference/generated/']
split_def = re.compile(r'^([\w\.]+)\(([\w\,\s=\*\.]*)\)')
rules = (
Rule(LinkExtractor(
allow=(re.compile(r'.+\.html')),
),
callback='parse_api',),
)
def parse_api(self, response):
self.logger.info(f'Scraping {response.url}')
fdef = response.css('dl.function > dt')
defs = []
for selector in fdef:
text = (remove_tags(selector.get())
.replace('\n', '')
.replace(' ', '')
.replace('[source]', ''))
defs.append(text)
for text in defs:
split = self.split_def.match(text)
if split is None:
continue
function_name = split.groups()[0].split('.')[-1]
params = split.groups()[1].split(',')
args = [p for p in params if '=' not in p]
kwargs = [p.split('=') for p in params if '=' in p]
item = ApiItem()
item['code'] = text
item['function_name'] = function_name
item['args'] = args
item['kwargs'] = kwargs
yield item
|
[
"amrmkayid@gmail.com"
] |
amrmkayid@gmail.com
|
8f83835fe45446652f09806816af34d50e785cb3
|
8f7b7a910520ba49a2e614da72f7b6297f617409
|
/Problemset/longest-substring-without-repeating-characters/longest-substring-without-repeating-characters.py
|
dc8487db359cebaaaaf3829e36b44a49590f76ec
|
[] |
no_license
|
fank-cd/python_leetcode
|
69c4466e9e202e48502252439b4cc318712043a2
|
61f07d7c7e76a1eada21eb3e6a1a177af3d56948
|
refs/heads/master
| 2021-06-16T23:41:55.591095
| 2021-03-04T08:31:47
| 2021-03-04T08:31:47
| 173,226,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
# @Title: 无重复字符的最长子串 (Longest Substring Without Repeating Characters)
# @Author: 2464512446@qq.com
# @Date: 2019-07-10 11:51:35
# @Runtime: 68 ms
# @Memory: 13.3 MB
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
滑窗法
"""
if not s:
return 0
left_index = 0
lookup = set()
leng = len(s)
max_len = 0
cur_len = 0
for i in range(leng):
cur_len +=1
while s[i] in lookup:
lookup.remove(s[left_index])
left_index += 1
cur_len -= 1
if cur_len >max_len:
max_len = cur_len
lookup.add(s[i])
return max_len
|
[
"2464512446@qq.com"
] |
2464512446@qq.com
|
1277be50c35d5f8141d67a8e05e653e78d81d442
|
4e29395020ce78f435e75e0b3f1e09b227f6f4d8
|
/ataraxia/algorithm/Eval/eval.py
|
5d666f6ab8a46c7ce2cea1d61d56bac77ffe1cdf
|
[] |
no_license
|
luoyangustc/argus
|
8b332d94af331a2594f5b1715ef74a4dd98041ad
|
2ad0df5d7355c3b81484f6625b82530b38b248f3
|
refs/heads/master
| 2020-05-25T21:57:37.815370
| 2019-05-22T09:42:40
| 2019-05-22T09:42:40
| 188,005,059
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
import pandas as pd
from collections import Counter
import pickle
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import average_precision_score
import json
import matplotlib.pyplot as plt
import itertools
import numpy as np
import json
import os
import docopt
dic_pred = {}
dic_pred_score = {}
dic_gt = {}
dic_gt_score = {}
y_gt = []
y_pred=[]
y_gt_score = []
y_score = []
pulp_score_list = []
sexy_score_list = []
normal_score_list = []
pulp_gt_list = []
sexy_gt_list = []
normal_gt_list = []
with open("output.json") as f1 ,open("groundtruth.json") as f2:
lines2 = f2.readlines()
d = json.load(f1)
for k,v in d.items():
lst = []
dic_pred[k] = v['Top-1 Index'][0]
lst.append(float(v["Confidence"][0]))
lst.append(float(v["Confidence"][1]))
lst.append(float(v["Confidence"][2]))
dic_pred_score[k] = lst
for line in lines2:
lst = []
basename = os.path.basename(json.loads(line.strip())['url'])
classes = json.loads(line.strip())['label'][0]['data'][0]["class"]
if classes == "sexy":
index = 1
lst = [0,1,0]
elif classes == "pulp":
index = 0
lst = [1,0,0]
else:
index = 2
lst = [0,0,1]
dic_gt[basename] = index
dic_gt_score[basename] = lst
for k in dic_pred:
if k in dic_gt:
y_gt.append(dic_gt[k])
y_pred.append(dic_pred[k])
pulp_score_list.append(dic_pred_score[k][0])
sexy_score_list.append(dic_pred_score[k][1])
normal_score_list.append(dic_pred_score[k][2])
pulp_gt_list.append(dic_gt_score[k][0])
sexy_gt_list.append(dic_gt_score[k][1])
normal_gt_list.append(dic_gt_score[k][2])
y_score.append(pulp_score_list)
y_score.append(sexy_score_list)
y_score.append(normal_score_list)
y_gt_score.append(pulp_gt_list)
y_gt_score.append(sexy_gt_list)
y_gt_score.append(normal_gt_list)
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues, figsize=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure(figsize=figsize)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
classes = ['pulp','sexy','normal']
cm = confusion_matrix(y_gt, y_pred, labels=np.arange(len(classes)))
p = precision_score(y_gt, y_pred, average=None)
r = recall_score(y_gt, y_pred, average=None)
# ap = average_precision_score(y_gt,y_pred)
acc = accuracy_score(y_gt, y_pred)
print('accuracy:', acc)
for i in range(len(classes)):
ap = average_precision_score(y_gt_score[i],y_score[i])
print('%s precision:' % classes[i], p[i])
print('%s recall:' % classes[i], r[i])
print('%s ap:'%classes[i],ap)
print('Top-1 error ',1-acc)
plot_confusion_matrix(cm, classes)
|
[
"luoyang@qiniu.com"
] |
luoyang@qiniu.com
|
a08bbad48829c38821e8d071a6a442413f27293f
|
f09dc121f213f2881df3572288b7ee5b39246d73
|
/aliyun-python-sdk-opensearch/aliyunsdkopensearch/request/v20171225/DescribeAppGroupDataReportRequest.py
|
758871adcb9db7d9bd59f3a70169885a82a3c3bd
|
[
"Apache-2.0"
] |
permissive
|
hetw/aliyun-openapi-python-sdk
|
2f31378ad6be0896fb8090423f607e9c7d3ae774
|
7443eacee9fbbaa93c7975c6dbec92d3c364c577
|
refs/heads/master
| 2023-01-19T22:42:36.214770
| 2020-12-04T10:55:14
| 2020-12-04T10:55:14
| 318,689,093
| 1
| 0
|
NOASSERTION
| 2020-12-05T03:03:03
| 2020-12-05T03:03:03
| null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkopensearch.endpoint import endpoint_data
class DescribeAppGroupDataReportRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'OpenSearch', '2017-12-25', 'DescribeAppGroupDataReport','opensearch')
self.set_uri_pattern('/v4/openapi/app-groups/[appGroupIdentity]/data-report')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_endTime(self):
return self.get_query_params().get('endTime')
def set_endTime(self,endTime):
self.add_query_param('endTime',endTime)
def get_startTime(self):
return self.get_query_params().get('startTime')
def set_startTime(self,startTime):
self.add_query_param('startTime',startTime)
def get_appGroupIdentity(self):
return self.get_path_params().get('appGroupIdentity')
def set_appGroupIdentity(self,appGroupIdentity):
self.add_path_param('appGroupIdentity',appGroupIdentity)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
c06d9cfdd26f717ccda451b45843ae4a72787c06
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/squareroot/367b3079-2a92-41b6-b880-54e3b3730861__squareRoot.py
|
80bbf1197a618a7a41813e23e44f3e78c6e83bf1
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621
| 2020-11-20T23:50:11
| 2020-11-20T23:50:11
| 60,889,742
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
# import math
def closeEnough(num1,num2):
if (abs(num1-num2) < 0.001):
return True
else:
return False
def squareRoot(num,guess):
# guess = 1
if(closeEnough((num/guess),guess)):
print round(guess,4)
else:
guess = ((num/guess)+guess)/2
squareRoot(num,guess)
if __name__ == '__main__':
num = input("Enter number:")
squareRoot(float(num),1.0)
|
[
"erickmiller@gmail.com"
] |
erickmiller@gmail.com
|
5ef4daa35ad9543b2fe5089654f203686e072f58
|
6550cc368f029b3955261085eebbddcfee0547e1
|
/第9部分-flask+智能玩具(火龙果)/day118/今日代码/day118/goto_tuling.py
|
1b8d9603d97063fdb0054446d7e0569040b61834
|
[] |
no_license
|
vividyellow/oldboyeduPython14qi
|
d00c8f45326e16464c3d4e8df200d93779f68bd3
|
de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11
|
refs/heads/master
| 2022-09-17T21:03:17.898472
| 2020-01-31T10:55:01
| 2020-01-31T10:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import requests
url = "http://openapi.tuling123.com/openapi/api/v2"
data_dict = {
"reqType":0,
"perception": {
"inputText": {
"text": "北京"
},
},
"userInfo": {
"apiKey": "c3a9ba0d958a43658a5acdcae50c13ae",
"userId": "jinwangbas"
}
}
def tl(text,uid):
data_dict["perception"]["inputText"]["text"] = text
data_dict["userInfo"]["userId"] = uid
res = requests.post(url,json=data_dict)
res_json = res.json()
return res_json.get("results")[0]["values"]["text"]
|
[
"524991368@qq.com"
] |
524991368@qq.com
|
5f267713a18175d80bf3a5ca7febbc4b744eccfe
|
a83108c53c454102317d7bb4e769f36b661c75ed
|
/config/geturlParams.py
|
b8c892530271f545e9612e647a80ae8f630fed4e
|
[] |
no_license
|
xuechao1/DX_interfaceTest
|
aa35a83390113c5be7d6bdf6e6c796b35059f63e
|
524cce8cc06dc8f045a9b98bfafdbaecd25726df
|
refs/heads/master
| 2022-12-05T17:00:45.289070
| 2020-08-20T10:14:28
| 2020-08-20T10:14:28
| 282,176,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
from config import readConfig as readConfig
readconfig = readConfig.ReadConfig()
class geturlParams(): # 定义一个方法,将从配置文件中读取的进行拼接
def get_Url(self):
new_url = readconfig.get_http('scheme') + '://' + readconfig.get_http('baseurl') + ':8888' + '/login' + '?'
# logger.info('new_url'+new_url)
return new_url
if __name__ == '__main__': # 验证拼接后的正确性
print(geturlParams().get_Url())
|
[
"623858143@qq.com"
] |
623858143@qq.com
|
c215661b1e7f55ca27629f44a33101451779afe1
|
0aa64aa023f80c97c8ded68dee7541ca7d3aa274
|
/ImageEnhancer/__init__.py
|
86c7e8ae06480cd84cd538a85a494986eb1b6c22
|
[] |
no_license
|
YuMurata/gdrive_scripts
|
fb7751625b0d73d50ee91f9ab74aa9883a317231
|
ce2cf48b757eca558e8f93892b9b929352431bfd
|
refs/heads/master
| 2020-12-20T04:38:00.127118
| 2020-03-13T05:02:26
| 2020-03-13T05:02:26
| 235,964,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from .enhance_definer \
import (enhance_class_list, enhance_dict, enhance_name_list,
MAX_PARAM, MIN_PARAM)
from .image_enhancer \
import (ImageEnhancer, ImageEnhancerException, ResizableEnhancer)
from .generate_param import generate_random_param
|
[
"purin7635@gmail.com"
] |
purin7635@gmail.com
|
a076db6989aa4821c423febbc9cc4b4248a6ecb3
|
18430833920b3193d2f26ed526ca8f6d7e3df4c8
|
/src/transmittals/migrations/0051_auto_20160226_1127.py
|
4cfd707e588c319531e2c6f51be7efc6f132cf50
|
[
"MIT"
] |
permissive
|
providenz/phase
|
ed8b48ea51d4b359f8012e603b328adf13d5e535
|
b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60
|
refs/heads/master
| 2021-01-17T06:56:07.842719
| 2016-06-28T11:17:53
| 2016-06-28T11:17:53
| 47,676,508
| 0
| 0
| null | 2015-12-09T07:45:19
| 2015-12-09T07:45:18
| null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transmittals', '0050_make_entities'),
]
operations = [
migrations.AlterField(
model_name='trsrevision',
name='originator_new',
field=models.ForeignKey(verbose_name='Originator', to='accounts.Entity'),
),
]
|
[
"thibault@miximum.fr"
] |
thibault@miximum.fr
|
58ef51cbfb19381e058ed5c3e284c3f524d96b96
|
de468d3ec6b7b69664678789e5fa71b613b29687
|
/scine_heron/tests/create_molecule_animator_test.py
|
2b7d9a0197b9dff66dcdc7a40c6f8a4ae1afba00
|
[
"BSD-3-Clause"
] |
permissive
|
qcscine/heron
|
dc566bf8bfdd5b5271ed79faed249a6552390d0d
|
688d2a510fda9f6bfaf5ef3af91fa3b988703a28
|
refs/heads/master
| 2023-04-06T23:31:14.931706
| 2022-08-31T05:40:15
| 2022-08-31T05:40:15
| 526,650,129
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,771
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = """ This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
"""
Tests for the create_molecule_animator function.
"""
from scine_heron.energy_profile.energy_profile_status_manager import (
EnergyProfileStatusManager,
)
from scine_heron.status_manager import StatusManager
from scine_heron.haptic.haptic_client import HapticClient
from scine_heron.electronic_data.electronic_data_status_manager import (
ElectronicDataStatusManager,
)
from scine_heron.settings.settings_status_manager import SettingsStatusManager
from scine_heron.molecule.create_molecule_animator import create_molecule_animator
from scine_heron.molecule.animator import Animator
from typing import Optional, List, TYPE_CHECKING, Any
import pytest
from vtk import vtkMolecule
# TODO Disabled as long as test_updates_molecule is disabled
# from PySide2.QtWidgets import QApplication
# from PySide2.QtCore import QEventLoop
if TYPE_CHECKING:
Signal = Any
else:
from PySide2.QtCore import Signal
@pytest.fixture(name="animator") # type: ignore[misc]
def create_animator(molecule: vtkMolecule) -> Animator:
"""
Creates a molecule animator with the function
`create_molecule_animator`.
"""
settings_manager = SettingsStatusManager()
energy_status_manager = EnergyProfileStatusManager()
charge_status_manager = StatusManager[Optional[List[float]]](None)
electronic_data_status_manager = ElectronicDataStatusManager()
return create_molecule_animator(
0,
molecule,
settings_manager,
HapticClient(),
energy_status_manager,
electronic_data_status_manager,
charge_status_manager,
Signal(),
)
# TODO this test does not work without a haptic device
# def test_updates_molecule(
# _app: QApplication, animator: Animator, molecule: vtkMolecule
# ) -> None:
# """
# Checks that the animator applies the gradient to the molecule.
# """
# startX = molecule.GetAtom(0).GetPosition().GetX()
# animator.start()
# loop = QEventLoop()
# animator.render_signal.connect(loop.quit)
# loop.exec_()
# assert molecule.GetAtom(0).GetPosition().GetX() > startX
# assert molecule.GetAtom(0).GetPosition().GetY() == pytest.approx(0.0)
# assert molecule.GetAtom(0).GetPosition().GetZ() == pytest.approx(0.0)
# assert molecule.GetAtom(1).GetPosition().GetX() == pytest.approx(
# -1.0 * molecule.GetAtom(0).GetPosition().GetX()
# )
# assert molecule.GetAtom(1).GetPosition().GetY() == pytest.approx(0.0)
# assert molecule.GetAtom(1).GetPosition().GetZ() == pytest.approx(0.0)
|
[
"scine@phys.chem.ethz.ch"
] |
scine@phys.chem.ethz.ch
|
8ded2230e932e52c5fa2c4c833ee7824fad8c28e
|
638af6b8c580eeae23fc1034882c4b514195137a
|
/Packages/vcs/Test/test_mesh_leg.py
|
5b4d3ee35046a718c7cf7f1b6b698c33d332b6db
|
[] |
no_license
|
doutriaux1/uvcdat
|
83684a86b514b8cac4d8900a503fc13d557fc4d2
|
37e9635f988696c346b4c3cdb49144d1e21dab5d
|
refs/heads/master
| 2021-01-17T07:57:22.897539
| 2015-02-02T22:52:12
| 2015-02-02T22:52:12
| 14,878,320
| 1
| 0
| null | 2015-02-19T20:54:25
| 2013-12-02T23:44:46
|
C
|
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
import vcs,cdms2 as cdms,sys,support,os
bg=support.bg
f=cdms.open(os.path.join(cdms.__path__[0],'..','..','..','..','sample_data','sampleCurveGrid4.nc'))
s=f('sample')
x=vcs.init()
t=x.createtemplate('jj')
m=x.createmeshfill('hh')
m.mesh='y'
m=x.createisofill('jj')
t.scale(.8)
t.legend.y2=.8
t.legend.x1=.8
t.legend.x2=.82
x.plot(s,t,m,bg=bg)
support.check_plot(x)
x.clear()
t.legend.x2=.78
t.legend.x1=.8
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.y2=t.legend.y1
t.legend.y1=.8
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.x1=.2
t.legend.x2=.8
t.legend.y1=.15
t.legend.y2=.2
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.y1=.15
t.legend.y2=.1
x.plot(s,m,t,bg=bg)
support.check_plot(x)
|
[
"doutriaux1@meryem.llnl.gov"
] |
doutriaux1@meryem.llnl.gov
|
4ae301b5361e0b6d3d2a61c245a7ff6c6621f51a
|
809a18b3dd7e90393a69d48f20be840041ef396e
|
/models/pointer.py
|
fbe08458d00cd150934dca5c3bb574b7b10f5407
|
[] |
no_license
|
wanwanaa/transformer-pointer
|
e58d4cb4b4f8a412515316457afac8708cdf1ef5
|
aac7bff63e19d4845ac23dfcf9131f1a20812efc
|
refs/heads/master
| 2020-05-17T17:08:05.747337
| 2019-09-08T13:02:30
| 2019-09-08T13:02:30
| 183,840,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,745
|
py
|
import torch
import torch.nn as nn
from models.encoder import Encoder
from models.decoder import Decoder
class Pointer(nn.Module):
def __init__(self, config):
super().__init__()
self.linear = nn.Sequential(
nn.Linear(config.model_size*3, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size))
self.linear_prob = nn.Linear(config.model_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, emb, hidden, context):
"""
:param emb:(batch, 1, model_size)
:param hidden: (batch, 1, model_size)
:param context: (batch, 1, model_size)
:return:(batch, c_len)
"""
context = self.linear(torch.cat((emb, hidden, context), dim=-1))
# -> (batch, 1, model_size) -> (batch, 1, 1)
prob = self.sigmoid(self.linear_prob(context)).squeeze()
return prob
class Luong_Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.model_size = config.model_size
self.linear_in = nn.Sequential(
nn.Linear(config.model_size, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size)
)
self.linear_out = nn.Sequential(
nn.Linear(config.model_size, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size)
)
self.softmax = nn.Softmax(dim=-1)
def forward(self, output, encoder_out):
"""
:param output: (batch, 1, hidden_size) decoder output
:param encoder_out: (batch, t_len, hidden_size) encoder hidden state
:return: attn_weight (batch, time_step)
"""
out = self.linear_in(output) # (batch, 1, hidden_size)
out = out.transpose(1, 2) # (batch, hidden_size, 1)
attn_weights = torch.bmm(encoder_out, out) # (batch, t_len, 1)
attn_weights = self.softmax(attn_weights.transpose(1, 2)) # (batch, 1, t_len)
context = torch.bmm(attn_weights, encoder_out)
context = self.linear_out(context) # (batch, 1, model_size)
return attn_weights.squeeze(), context
class Transformer_Pointer(nn.Module):
def __init__(self, config):
super().__init__()
self.encoder_word = Encoder(config, config.src_vocab_size)
self.encoder_char = Encoder(config, config.tgt_vocab_size)
self.pointer = Pointer(config)
self.attention = Luong_Attention(config)
self.decoder = Decoder(config)
self.linear_out = nn.Linear(config.model_size, config.tgt_vocab_size)
self.softmax = nn.Softmax(dim=-1)
self.s_len = config.s_len
self.bos = config.bos
# add <bos> to sentence
def convert(self, x):
"""
:param x:(batch, s_len) (word_1, word_2, ... , word_n)
:return:(batch, s_len) (<bos>, word_1, ... , word_n-1)
"""
if torch.cuda.is_available():
start = (torch.ones(x.size(0), 1) * self.bos).type(torch.cuda.LongTensor)
else:
start = (torch.ones(x.size(0), 1) * self.bos).type(torch.LongTensor)
x = torch.cat((start, x), dim=1)
return x[:, :-1]
def forward(self, x_w, x_c, y):
"""
:param x_w:
:param x_c:
:param y:
:return: (batch, s_len, vocab_size)
"""
y_s = self.convert(y)
encoder_out = self.encoder_word(x_w)
encoder_attn = self.encoder_char(x_c)
final = []
for i in range(self.s_len):
dec_output = self.decoder(x_w, y_s[:, :i+1], encoder_out)
emb = self.decoder.embedding(y_s[:, i].unsqueeze(1))
output = self.linear_out(dec_output[:, -1, :])
# gen (batch, vocab_size)
gen = self.softmax(output)
# pointer
# ptr (batch, c_len)
# context (batch, 1, model_size)
ptr, context = self.attention(dec_output[:, -1, :].unsqueeze(1), encoder_attn)
# prob (batch, )
prob = self.pointer(emb, dec_output[:, -1, :].unsqueeze(1), context).unsqueeze(1)
final_out = (1-prob) * gen
final_out = final_out.scatter_add_(1, x_c, prob*ptr)
final.append(final_out)
return torch.stack(final)
def sample(self, x_w, x_c):
encoder_out = self.encoder_word(x_w)
encoder_attn = self.encoder_char(x_c)
start = torch.ones(x_w.size(0)) * self.bos
start = start.unsqueeze(1)
if torch.cuda.is_available():
start = start.type(torch.cuda.LongTensor)
else:
start = start.type(torch.LongTensor)
# the first <start>
out = torch.ones(x_w.size(0)) * self.bos
out = out.unsqueeze(1)
final = []
for i in range(self.s_len):
if torch.cuda.is_available():
out = out.type(torch.cuda.LongTensor)
else:
out = out.type(torch.LongTensor)
dec_output = self.decoder(x_w, out, encoder_out)
emb = self.decoder.embedding(out[:, -1].unsqueeze(1))
output = self.linear_out(dec_output[:, -1, :])
gen = self.softmax(output)
ptr, context = self.attention(dec_output[:, -1, :].unsqueeze(1), encoder_attn)
# prob (batch, )
prob = self.pointer(emb, dec_output[:, -1, :].unsqueeze(1), context).unsqueeze(1)
final_out = (1 - prob) * gen
final_out = final_out.scatter_add_(1, x_c, prob * ptr)
final.append(final_out)
gen = torch.argmax(gen, dim=-1).unsqueeze(1)
out = torch.cat((out, gen), dim=1)
return torch.stack(final), out
|
[
"1551612415@qq.com"
] |
1551612415@qq.com
|
40f8185de6c03d4570b59f34d711c624447175de
|
093b9569be9d1c4e5daf92efbebc38f680917b2d
|
/.history/base/views_20210828170402.py
|
51bdc9919f9fd0fdfec82eaaf4503593d19de3b0
|
[] |
no_license
|
Justin-Panagos/todoList
|
95b1e97ff71af1b0be58e7f8937d726a687cea4d
|
10539219b59fcea00f8b19a406db3d4c3f4d289e
|
refs/heads/master
| 2023-08-04T13:27:13.309769
| 2021-08-29T14:06:43
| 2021-08-29T14:06:43
| 400,827,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def taskList(request):{
return HttpResponse('')
}
|
[
"justpanagos@gmail.com"
] |
justpanagos@gmail.com
|
8f2dc8276cd6ae79f6dc311c301f1ec35490ef60
|
7d3592b74233ee8b1afa2fac00fa5a6f045f5525
|
/tutorials/inputFromDependent/inputFromDependent.py
|
a53bf2778a6a20a6298dff152673c8f3bb9fa422
|
[
"Apache-2.0"
] |
permissive
|
afcarl/PyPPL
|
eca2a3e32729d7fd65042164b82c84f21877de2d
|
c6c654f163d1bdd0ae0357025c5782b17c14b93c
|
refs/heads/master
| 2020-03-25T18:51:46.872319
| 2018-08-08T16:10:21
| 2018-08-08T16:10:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
from pyppl import PyPPL, Proc, Channel
pSort = Proc(desc = 'Sort files.')
pSort.input = {"infile:file": Channel.fromPattern("./data/*.txt")}
pSort.output = "outfile:file:{{in.infile | fn}}.sorted"
pSort.forks = 5
pSort.script = """
sort -k1r {{in.infile}} > {{out.outfile}}
"""
pAddPrefix = Proc(desc = 'Add line number to each line.')
pAddPrefix.depends = pSort
# automatically inferred from pSort.output
pAddPrefix.input = "infile:file"
pAddPrefix.output = "outfile:file:{{in.infile | fn}}.ln"
pAddPrefix.exdir = './export'
pAddPrefix.forks = 5
pAddPrefix.script = """
paste -d. <(seq 1 $(wc -l {{in.infile}} | cut -f1 -d' ')) {{in.infile}} > {{out.outfile}}
"""
PyPPL().start(pSort).run()
|
[
"pwwang@pwwang.com"
] |
pwwang@pwwang.com
|
a468ac84eca5711fba49ccbc853f7c7e6841ca2f
|
0953f9aa0606c2dfb17cb61b84a4de99b8af6d2c
|
/python/ray/tests/test_component_failures_2.py
|
e0bebf7bd94d429633105b75055895a0c3c07d53
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
oscarknagg/ray
|
da3dc03e24945ff4d5718fd35fc1b3408d8907eb
|
20d47873c9e8f5bbb80fe36e5d16256c337c4db3
|
refs/heads/master
| 2023-09-01T01:45:26.364731
| 2021-10-21T07:46:52
| 2021-10-21T07:46:52
| 382,402,491
| 2
| 1
|
Apache-2.0
| 2021-09-15T12:34:41
| 2021-07-02T16:25:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,689
|
py
|
import os
import signal
import sys
import time
import pytest
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray._private.test_utils import (
RayTestTimeoutException,
get_other_nodes,
wait_for_condition,
)
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
if num_nodes == 4 and sys.platform == "win32":
pytest.skip("Failing on Windows.")
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_refs = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_refs += [f.remote(object_ref) for object_ref in object_refs]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
try:
os.kill(pid, SIGKILL)
except OSError:
# The process may have already exited due to worker capping.
pass
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_ref in object_refs:
try:
ray.get(object_ref)
except (ray.exceptions.RayTaskError,
ray.exceptions.WorkerCrashedError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
return x
@ray.remote
def g(*xs):
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 8,
"num_nodes": 4,
"_system_config": {
"num_heartbeats_timeout": 10
},
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
def test_get_node_info_after_raylet_died(ray_start_cluster_head):
cluster = ray_start_cluster_head
def get_node_info():
return ray._private.services.get_node_to_connect_for_driver(
cluster.redis_address,
cluster.head_node.node_ip_address,
redis_password=cluster.redis_password)
assert get_node_info(
).raylet_socket_name == cluster.head_node.raylet_socket_name
cluster.head_node.kill_raylet()
wait_for_condition(
lambda: not cluster.global_state.node_table()[0]["Alive"], timeout=30)
with pytest.raises(RuntimeError):
get_node_info()
node2 = cluster.add_node()
assert get_node_info().raylet_socket_name == node2.raylet_socket_name
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
[
"noreply@github.com"
] |
oscarknagg.noreply@github.com
|
08cdc3d43b17366cc761a55a2a4f143677cdb5ab
|
bd300a8b8a0cd370e514580992c9480f64d076da
|
/django_schoolweb/django_schoolweb/urls.py
|
4ee5649975f5eaf43d0a9d52fd0387b89f82197c
|
[] |
no_license
|
33Da/schoolweb
|
546a19046e42da59d082e0f1f492a14a21a17078
|
9d92bab3b1590180231efb8e74a68c375149599e
|
refs/heads/main
| 2023-02-08T12:48:35.250628
| 2021-01-05T06:58:55
| 2021-01-05T06:58:55
| 326,911,447
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
from django.urls import path
from django_schoolweb.settings import MEDIA_ROOT
from django.conf.urls import url,include
from django.views.static import serve
urlpatterns = [
path('admin/', include("adminuser.urls")),
path('', include("school.urls")),
url(r"^media/(?P<path>.*)$", serve, {"document_root": MEDIA_ROOT}),
]
|
[
"764720843@qq.com"
] |
764720843@qq.com
|
35a43d22bca52737c7a041fd115e08514fb30e46
|
e953679220ff59b58eb964b97a98ef026283c8e6
|
/Ch26/2603.py
|
61dc80014e773ba476aeef0ea4b76edae8f61819
|
[] |
no_license
|
lhy0807/A2CS
|
9e440b85b53c79eb0367f3c478f866911422b8d8
|
6d793c1cc4989b123ba8ff1676e376681531c7d2
|
refs/heads/master
| 2021-04-15T06:10:36.178244
| 2018-03-23T02:54:55
| 2018-03-23T02:54:55
| 125,968,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
# Chapter 26.03 for normal file
# Exception Handling
# Tianhe Zhang
import pickle
import random
class CarRecord(object):
def __init__(self):
self.vhicleID = ""
self.registration = ""
self.dateRegist = None
self.engineSize = 0
self.purchasePrice = 0.00
def __repr__(self):
return \
"vhicleID: {};\
registration: {};\
dateRegist: {};\
engineSize: {};\
purchasePrice: {}".format(self.vhicleID, self.registration, \
self.dateRegist, self.engineSize, self.purchasePrice)
def write():
cars = []
length = 5
for i in range(length):
newCar = CarRecord()
newCar.vhicleID = str(hash(i))
newCar.registration = str(random.randint(0,1000))
newCar.dateRegist = "12/28/1999"
newCar.engineSize = random.randrange(5) * 10
newCar.purchasePrice = float(random.randint(10000, 99999))
cars.append(newCar)
file = open("LOGS.car", "wb")
for i in range(length):
pickle.dump(cars[i], file)
file.close()
def read():
try:
file = open("LOGS.car", "rb")
except:
raise NameError("File Not Found")
cars = []
length = 5
i = 0
while i < length:
cars.append(pickle.load(file))
i += 1
return cars
def out():
write()
c = read()
for i in range(len(c)):
print(c[i])
out()
#############
# TASK 26.02
'''
import pickle
class CarRecord(object):
def __init__(self):
self.vhicleID = ""
self.registration = ""
self.dateRegist = None
self.engineSize = 0
self.purchasePrice = 0.00
def __repr__(self):
return \
"vhicleID: {};\
registration: {};\
dateRegist: {};\
engineSize: {};\
purchasePrice: {}".format(self.vhicleID, self.registration, \
self.dateRegist, self.engineSize, self.purchasePrice)
def createFile():
newCar1 = CarRecord()
newCar1.vhicleID = "499500"
newCar1.registration = "abc"
newCar1.dateRegist = "1999/12/28"
newCar1.engineSize = 100
newCar1.purchasePrice = 1000.02
newCar2 = CarRecord()
newCar2.vhicleID = "100112"
newCar2.registration = "flk"
newCar2.dateRegist = "1989/06/04"
newCar2.engineSize = 200
newCar2.purchasePrice = 13200.02
newCar3 = CarRecord()
newCar3.vhicleID = "549123"
newCar2.registration = "grs"
newCar2.dateRegist = "2001/09/11"
newCar2.engineSize = 400
newCar2.purchasePrice = 4569.78
l1 = [newCar1, newCar2, newCar3]
car_file = open('RAND.car', "wb")###
car_file.close()
car_file = open('RAND.car', "ab+")
for i in range(len(l1)):
cur_car = l1[i]
addr = abs(hash(cur_car.vhicleID))
car_file.seek(addr)
print(len(pickle.dumps(cur_car)))
pickle.dump(cur_car, car_file)
car_file.close()
def find(vhicleID):
r = []
try:
file = open("RAND.car", "rb")
except:
raise KeyError("File Not Found")
for i in range(3):
addr = abs(hash(vhicleID))
print(addr)
file.seek(addr)
cur_car = pickle.load(file)
r.append(cur_car)
file.close()
for i in range(len(r)):
print(r[i])
createFile()
find('499500')
'''
|
[
"lihongyu0807@icloud.com"
] |
lihongyu0807@icloud.com
|
fd95b47ac6d809723b958ffb0b5efb3258c2147e
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/networks349.py
|
768b477ea4b88f0a007d8e534888113870d95ca9
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656
| 2021-08-27T11:02:45
| 2021-08-27T11:02:45
| 284,072,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,512
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.lin0 = nn.Linear(latent_dims, seq_len//64*4096, bias=True)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convp = nn.ConvTranspose1d(in_channels, out_channels, 1, 1, 0)
self.convu = nn.ConvTranspose1d(in_channels, out_channels, 4, 2, 1)
self.conv1 = nn.ConvTranspose1d(out_channels, out_channels, 3, 1, 1)
self.bnu = nn.BatchNorm1d(out_channels)
self.bn1 = nn.BatchNorm1d(out_channels)
self.act = nn.ReLU()
def forward(self, x):
y0 = F.interpolate(self.convp(x), scale_factor=2, mode='linear')
y = self.act(self.bnu(self.convu(x)))
y = self.act(y0 + self.bn1(self.conv1(y)))
return y
self.gb1 = GBlock(4096, 3072)
self.gb2 = GBlock(3072, 2048)
self.gb3 = GBlock(2048, 1024)
self.gb4 = GBlock(1024, 768)
self.gb5 = GBlock(768, 512)
self.gb6 = GBlock(512, 256)
self.convw1 = nn.ConvTranspose1d(256, n_wires, 1, 1, 0)
self.bnp0 = nn.BatchNorm1d(n_wires)
self.convp1 = nn.ConvTranspose1d(n_wires, 256, 3, 1, 1)
self.bnp1 = nn.BatchNorm1d(256)
self.convp2 = nn.ConvTranspose1d(256, 64, 3, 1, 1)
self.bnp2 = nn.BatchNorm1d(64)
self.convp3 = nn.ConvTranspose1d(64, n_features, 1, 1, 0)
self.out = nn.Tanh()
def forward(self, z):
# z: random point in latent space
x = self.act(self.lin0(z).view(-1, 4096, self.seq_len // 64))
x = self.gb1(x)
x = self.gb2(x)
x = self.gb3(x)
x = self.gb4(x)
x = self.gb5(x)
x = self.gb6(x)
w = self.convw1(x)
p = self.act(self.bnp1(self.convp1(self.act(self.bnp0(w)))))
p = self.act(self.bnp2(self.convp2(p)))
p = self.convp3(p)
return torch.cat([self.out(p), w], dim=1)
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convd = nn.Conv1d(in_channels, out_channels, 4, 2, 1)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.convd(x))
return y
self.conv0 = nn.Conv1d(n_features+2, 64, 1, 1, 0)
self.conv1 = nn.Conv1d(64, 128, 9, 1, 4)
self.dbw1 = DBlock(128, 128)
self.dbw2 = DBlock(128, 256)
self.dbw3 = DBlock(256, 512)
self.lin0 = nn.Linear(512 * seq_len // 8, 512, bias=True)
self.lin1 = nn.Linear(512, 1, bias=True)
self.out = nn.Identity()
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x[:,:n_features]
w = x[:,n_features:]
#x = torch.cat([p, w], dim=1)
x = self.act(self.conv0(x))
x = self.act(self.conv1(x))
x = self.dbw1(x)
x = self.dbw2(x)
x = self.dbw3(x)
x = self.lin0(x.flatten(1,2))
x = self.lin1(self.act(x))
return self.out(x).squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
|
[
"m.dubouchet18@imperial.ac.uk"
] |
m.dubouchet18@imperial.ac.uk
|
a72324c97d49222640aa847c71384a651ca6219c
|
d04d73bed28c366712103663d3e3be13622611b9
|
/pactools/dar_model/stable_dar.py
|
30eecbf63154b2b87defb16172559556714ed1c8
|
[] |
no_license
|
EtienneCmb/pactools
|
fcb13cde6f57a5c6abf4b033c24aec72c1201ca7
|
6e5a53deefc4dcede6a4a0293958e39a660dba97
|
refs/heads/master
| 2021-01-21T23:20:18.491959
| 2017-06-08T12:49:55
| 2017-06-08T12:49:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,217
|
py
|
import numpy as np
from .baseLattice import BaseLattice
class StableDAR(BaseLattice):
"""
A stable driven auto-regressive (DAR) model, as described in [1].
This model is designed to have an stable instantaneous AR model at each
time.
This model uses the parametrization:
.. math:: y(t) + \\sum_{i=1}^p a_i(t) y(t-i)= \\varepsilon(t)
with:
.. math:: a_p^{(p)} = k_p; \;\;\;\;
a_i^{(p)} = a_i^{(p-1)} + k_p a_{p-i}^{(p-1)}
.. math:: \\gamma_i = \\log\\left(\\frac{1+k_i}{1-k_i}\\right); \;\;\;\;
\\gamma_{i}(t)=\\sum_{j=0}^{m}\\gamma_{ij}x(t)^j
References
----------
[1] Dupre la Tour, T. , Grenier, Y., & Gramfort, A. (2017). Parametric
estimation of spectrum driven by an exogenous signal. Acoustics, Speech
and Signal Processing (ICASSP), 2017 IEEE International Conference on,
4301--4305.
"""
# ------------------------------------------------ #
# Functions that overload abstract methods #
# ------------------------------------------------ #
def decode(self, lar):
"""Extracts parcor coefficients from encoded version (e.g. LAR)
lar : array containing the encoded coefficients
returns:
ki : array containing the decoded coefficients (same size as lar)
"""
exp_lar = np.exp(lar)
ki = (exp_lar - 1.0) / (exp_lar + 1.0)
return ki
def encode(self, ki):
"""Encodes parcor coefficients to LAR coefficients
ki : array containing the original parcor coefficients
returns:
lar : array containing the encoded coefficients (same size as ki)
"""
lar = np.log((1.0 + ki) / (1.0 - ki))
return lar
def common_gradient(self, p, ki):
"""Compute common factor in gradient. The gradient is computed as
G[p] = sum from t=1 to T {g[p,t] * F(t)}
where F(t) is the vector of driving signal and its powers
g[p,t] = (e_forward[p, t] * e_backward[p-1, t-1]
+ e_backward[p, t] * e_forward[p-1, t]) * phi'[k[p,t]]
phi is the encoding function, and phi' is its derivative.
p : order corresponding to the current lattice cell
ki : array containing the original parcor coefficients
returns:
g : array containing the factors (size (n_epochs, n_points - 1))
"""
e_forward = self.forward_residual
e_backward = self.backward_residual
_, n_epochs, n_points = e_forward.shape
g = e_forward[p, :, 1:n_points] * e_backward[p - 1, :, 0:n_points - 1]
g += e_backward[p, :, 1:n_points] * e_forward[p - 1, :, 1:n_points]
g *= 0.5 * (1.0 - ki[:, 1:n_points] ** 2) # phi'[k[p,t]])
return np.reshape(g, (n_epochs, n_points - 1))
def common_hessian(self, p, ki):
"""Compute common factor in Hessian. The Hessian is computed as
H[p] = sum from t=1 to T {F(t) * h[p,t] * F(t).T}
where F(t) is the vector of driving signal and its powers
h[p,t] = (e_forward[p, t-1]**2 + e_backward[p-1, t-1]**2)
* phi'[k[p,t]]**2
+ (e_forward[p, t] * e_backward[p-1, t-1]
e_backward[p, t] * e_forward[p-1, t]) * phi''[k[p,t]]
phi is the encoding function, phi' is its first derivative,
and phi'' is its second derivative.
p : order corresponding to the current lattice cell
ki : array containing the original parcor coefficients
returns:
h : array containing the factors (size (n_epochs, n_points - 1))
"""
e_forward = self.forward_residual
e_backward = self.backward_residual
_, n_epochs, n_points = e_forward.shape
h1 = e_forward[p - 1, :, 1:n_points] ** 2
h1 += e_backward[p - 1, :, 0:n_points - 1] ** 2
h1 *= (0.5 * (1.0 - ki[:, 1:n_points] ** 2)) ** 2
h2 = e_forward[p, :, 1:n_points] * e_backward[p - 1, :, 0:n_points - 1]
h2 += e_backward[p, :, 1:n_points] * e_forward[p - 1, :, 1:n_points]
h2 *= (-0.5 * ki[:, 1:n_points] * (1.0 - ki[:, 1:n_points] ** 2))
return np.reshape(h1 + h2, (n_epochs, n_points - 1))
|
[
"tom.dupre-la-tour@m4x.org"
] |
tom.dupre-la-tour@m4x.org
|
200563d19042e582fedc874dc3439dc35a2edde5
|
925f2935b34042abc9161795413031ae68f45b9a
|
/multimodel_inference/fold_SC3imlsm.py
|
a33c67d65aca02ed924dbf5fb636eb928ca937a4
|
[] |
no_license
|
Farhad63/AFS-analysis-with-moments
|
7e1d17f47c06ed97ebb7c9ec8245fe52a88622c3
|
7874b1085073e5f62d910ef2d79a22b29ff3be84
|
refs/heads/master
| 2022-04-09T22:11:12.341235
| 2020-03-11T21:15:42
| 2020-03-11T21:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,801
|
py
|
#!/usr/bin/env python
# split, three epochs in each pop, symmetric migration in middle and late epochs
# genomic islands
# n(para): 13
import matplotlib
matplotlib.use('PDF')
import moments
import pylab
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
#params=[float(sys.argv[6]),float(sys.argv[7]),float(sys.argv[8]),float(sys.argv[9]),float(sys.argv[10]),float(sys.argv[11])]
params=[1,1,1,1,1,1,1,1,1,1,1,0.5]
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
dd = Misc.make_data_dict(infile)
# set Polarized=False below for folded AFS analysis
data = Spectrum.from_data_dict(dd, pop_ids,projections,polarized=False)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split into unequal pop sizes with asymmetrical migration
def sc3imsm(params , ns):
# p_misid: proportion of misidentified ancestral states
nu1_1, nu2_1, nu1_2,nu2_2,nu1_3,nu2_3,T1, T2, T3,m,mi, P = params
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1_1, nu2_1], T1, m = np.array([[0, 0], [0, 0]]))
fs.integrate([nu1_2, nu2_2], T2, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_3, nu2_3], T3, m = np.array([[0, m], [m, 0]]))
stsi = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fsi = moments.Spectrum(stsi)
fsi = moments.Manips.split_1D_to_2D(fsi, ns[0], ns[1])
fsi.integrate([nu1_1, nu2_1], T1, m = np.array([[0, 0], [0, 0]]))
fsi.integrate([nu1_2, nu2_2], T2, m = np.array([[0, mi], [mi, 0]]))
fsi.integrate([nu1_3, nu2_3], T3, m = np.array([[0, mi], [mi, 0]]))
fs2=P*fsi+(1-P)*fs
return fs2
func=sc3imsm
upper_bound = [100, 100, 100,100,100, 100, 100, 100,100, 200,200,0.999]
lower_bound = [1e-3,1e-3, 1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5,0.001]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="sc3imlsm_"+ind+".png", pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
all_boot=moments.Misc.bootstrap(dd,pop_ids,projections)
uncert=moments.Godambe.GIM_uncert(func,all_boot,poptg,data)
# printing parameters and their SDs
print "RESULT","sc3imlsm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta,uncert
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("sc3imlsm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
|
[
"matz@utexas.edu"
] |
matz@utexas.edu
|
9d8fad03fbcd9b26dbe40081fb48b3d40e173dda
|
6f3a7844321241ab2c46215a1ed3d1a246727c18
|
/MPs/MP1/common_friends.py
|
8e3b328d057d2b475f67ec5bb733adb500384c0c
|
[] |
no_license
|
gouthamp900/cs199-fa17
|
899fa345bf3c1c3d5eb745820a2c884216d7f657
|
4eb635c6a064dd2c61253654ca729769d995f563
|
refs/heads/master
| 2021-06-24T21:44:10.021054
| 2017-09-11T00:58:30
| 2017-09-11T00:58:30
| 103,450,202
| 4
| 0
| null | 2017-09-13T21:03:27
| 2017-09-13T21:03:27
| null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
from map_reducer import MapReduce
def friend_mapper(line):
''' write your code here! '''
pass
def friend_reducer(friend_tuples):
''' write your code here! '''
pass
def _run_common_friend_finder(filename):
with open(filename) as f:
lines = f.readlines()
mr = MapReduce(friend_mapper, friend_reducer)
common_friends = mr(lines)
for relationship, friends in common_friends:
print('{}\t{}'.format(relationship, friends))
if __name__ == '__main__':
print('friend_graph_example.txt')
_run_common_friend_finder('friend_graph_example.txt')
print('friend_graph.txt')
_run_common_friend_finder('friend_graph.txt')
|
[
"bencongdon96@gmail.com"
] |
bencongdon96@gmail.com
|
8c32175a2770eff6b1c971db34e742e17ff04c5e
|
5d74051293a4740c597abb016870a56a58cecf5b
|
/modules/shared/infrastructure/passwords/django/__init__.py
|
d5c02785b58d9ae5462b7f2646e11fb08dc3e688
|
[
"BSD-3-Clause"
] |
permissive
|
eduardolujan/hexagonal_architecture_django
|
98e707148745f5a36f166c0584cfba21cca473f0
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
refs/heads/develop
| 2023-02-21T22:46:20.614779
| 2021-01-16T02:48:37
| 2021-01-16T02:48:37
| 305,813,872
| 5
| 2
|
BSD-3-Clause
| 2021-01-16T18:00:26
| 2020-10-20T19:32:46
|
Python
|
UTF-8
|
Python
| false
| false
| 171
|
py
|
# -*- coding: utf-8 -*-
from .password_creator import PasswordCreator
from .password_checker import PasswordChecker
__all__ = ('PasswordCreator', 'PasswordChecker', )
|
[
"eduardo.lujan.p@gmail.com"
] |
eduardo.lujan.p@gmail.com
|
bf02de413f3d6e40eb57f713c624fff6d8fbd472
|
87e60b0504be11c6997f1b20b72e9428cc128342
|
/python/cowbells/geom/surfaces.py
|
3e835c423e358ade9f4e13d82437578dd2b758dd
|
[] |
no_license
|
brettviren/cowbells
|
70a85856fdfc54526c847f115d5dc01ec85ec215
|
1ceca86383f4f774d56c3f159658518242875bc6
|
refs/heads/master
| 2021-01-10T18:44:41.531525
| 2014-04-09T15:17:29
| 2014-04-09T15:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
#!/usr/bin/env python
'''
Describe optical surfaces
'''
import base, volumes
store = []
class OpticalSurface(base.Base):
# Known parameters
known_parameters = ['type', 'model', 'finish', 'first', 'second',
'polish', 'sigmaalpha']
# Known properties
known_properties = ['RINDEX','REALRINDEX','IMAGINARYRINDEX',
'REFLECTIVITY','EFFICIENCY','TRANSMITTANCE',
'SPECULARLOBECONSTANT','SPECULARSPIKECONSTANT',
'BACKSCATTERCONSTANT']
def __init__(self, name, **parameters):
self.name = name
self.parameters = {}
self.properties = {}
for k,v in parameters.iteritems():
self.add_parameter(k,v)
continue
store.append(self)
return
def add_parameter(self, key, value):
assert key in self.known_parameters, \
'Unknown parameter given to surface %s: "%s"' % (self.name, key)
if key in ['first','second']:
if isinstance(value, volumes.LogicalVolume):
value = value.name
self.parameters[key] = value
return
def add_property(self, propname, x, y):
self.properties[propname] = {'x':x, 'y':y}
return
pass
def get(surf):
if isinstance(surf, OpticalSurface):
return surf
for s in store:
if s.name == surf:
return s
return None
def pod(): return base.pod(store)
|
[
"brett.viren@gmail.com"
] |
brett.viren@gmail.com
|
8210e6228b034876d6073be5b96b8126496060ab
|
1af78033850e5bbe7a66ad83a238b96e7e2f2778
|
/app/models/post.py
|
fd56443c27624b53a2c22c678039e88e9560e1e4
|
[
"MIT"
] |
permissive
|
Sean10/flask_demo
|
e7c0aed4a0633f03ded079cadec322dc4bdc6076
|
a04b284a1e812f5d291b67fbd04e3073063003f1
|
refs/heads/master
| 2020-03-27T22:22:30.677486
| 2018-09-03T15:55:10
| 2018-09-03T15:55:10
| 147,225,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,971
|
py
|
import datetime
import html
from bson.objectid import ObjectId
from ..utils import *
class Post:
def __init__(self, default_config):
self.collection = default_config['POSTS_COLLECTION']
self.response = {'error': None, 'data': None}
self.debug_mode = default_config['DEBUG']
def get_posts(self, limit, skip, tag=None, search=None):
'''
:param limit:
:param skip:
:param tag:
:param search:
:return:
'''
self.response['error'] = None
cond = {}
if tag is not None:
cond = {'tags': tag}
elif search is not None:
cond = {'$or': [
{'title': {'$regex': search, '$options': 'i'}},
{'body': {'$regex': search, '$options': 'i'}},
{'preview': {'$regex': search, '$options': 'i'}}]}
try:
cursor = self.collection.find(cond).sort(
'date', direction=-1).skip(skip).limit(limit)
self.response['data'] = []
for post in cursor:
if 'tags' not in post:
post['tags'] = []
if 'comments' not in post:
post['comments'] = []
if 'preview' not in post:
post['preview'] = ''
self.response['data'].append({'id': post['_id'],
'title': post['title'],
'body': post['body'],
'preview': post['preview'],
'date': post['date'],
'permalink': post['permalink'],
'tags': post['tags'],
'author': post['author'],
'comments': post['comments']})
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Posts not found..'
return self.response
def get_post_by_permalink(self, permalink):
self.response['error'] = None
try:
self.response['data'] = self.collection.find_one(
{'permalink': permalink})
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Post not found..'
return self.response
def get_post_by_id(self, post_id):
self.response['error'] = None
try:
self.response['data'] = self.collection.find_one(
{'_id': ObjectId(post_id)})
if self.response['data']:
if 'tags' not in self.response['data']:
self.response['data']['tags'] = ''
else:
self.response['data']['tags'] = ','.join(
self.response['data']['tags'])
if 'preview' not in self.response['data']:
self.response['data']['preview'] = ''
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Post not found..'
return self.response
def get_total_count(self, tag=None, search=None):
cond = {}
if tag is not None:
cond = {'tags': tag}
elif search is not None:
cond = {'$or': [
{'title': {'$regex': search, '$options': 'i'}},
{'body': {'$regex': search, '$options': 'i'}},
{'preview': {'$regex': search, '$options': 'i'}}]}
return self.collection.find(cond).count()
def get_tags(self):
self.response['error'] = None
try:
self.response['data'] = list(self.collection.aggregate([
{'$unwind': '$tags'},
{'$group': {'_id': '$tags', 'count': {'$sum': 1}}},
{'$sort': {'count': -1}},
{'$limit': 10},
{'$project': {'title': '$_id', 'count': 1, '_id': 0}}
]))
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Get tags error..'
return self.response
def create_new_post(self, post_data):
self.response['error'] = None
try:
self.response['data'] = self.collection.insert(post_data)
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Adding post error..'
return self.response
def edit_post(self, post_id, post_data):
self.response['error'] = None
del post_data['date']
del post_data['permalink']
try:
self.collection.update(
{'_id': ObjectId(post_id)}, {"$set": post_data}, upsert=False)
self.response['data'] = True
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Post update error..'
return self.response
def delete_post(self, post_id):
self.response['error'] = None
try:
if self.get_post_by_id(post_id) and self.collection.remove({'_id': ObjectId(post_id)}):
self.response['data'] = True
else:
self.response['data'] = False
except Exception as e:
self.print_debug_info(e, self.debug_mode)
self.response['error'] = 'Deleting post error..'
return self.response
@staticmethod
def validate_post_data(post_data):
print("while")
permalink = random_string(12)
print(permalink)
#exp = re.compile('\W')
#whitespace = re.compile('\s')
#temp_title = whitespace.sub("_", post_data['title'])
#permalink = exp.sub('', temp_title)
post_data['title'] = html.escape(post_data['title'])
post_data['preview'] = html.escape(post_data['preview'], quote=True)
post_data['body'] = html.escape(post_data['body'], quote=True)
post_data['date'] = datetime.datetime.utcnow()
post_data['permalink'] = permalink
return post_data
@staticmethod
def print_debug_info(msg, show=False):
if show:
import sys
import os
error_color = '\033[32m'
error_end = '\033[0m'
error = {'type': sys.exc_info()[0].__name__,
'file': os.path.basename(sys.exc_info()[2].tb_frame.f_code.co_filename),
'line': sys.exc_info()[2].tb_lineno,
'details': str(msg)}
print(error_color)
print('\n\n---\nError type: %s in file: %s on line: %s\nError details: %s\n---\n\n'\
% (error['type'], error['file'], error['line'], error['details']))
print(error_end)
|
[
"sean10reborn@gmail.com"
] |
sean10reborn@gmail.com
|
ee8ca2cdad8861221f07769c684b849247fb52ab
|
5e20e9281c15587e8de2cce5b8eb342cae6b8645
|
/astrohut/examples/collision3d.py
|
3c0b384bc89b24bcdcfb12a8ce04744406d87dc6
|
[] |
no_license
|
jsbarbosa/astrohut
|
b6d0a76328d09f205a711b607e7fca4e9a51e178
|
c2f8b721ec3ea8396ce321d44d881aa92dfa94f3
|
refs/heads/master
| 2021-09-18T11:51:10.142641
| 2018-07-13T18:50:18
| 2018-07-13T18:50:18
| 85,311,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
import numpy as np
import astrohut as ah
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
G = 1.0
m = 1.0
N = 50
pos1 = np.zeros((N, 3))
pos2 = np.zeros_like(pos1)
pos1[:, :2] = np.random.normal(size = (N, 2))
pos2[:, :2] = np.random.normal(loc = 3.0, size = (N, 2))
pos2[:, 2] = 5.0
speeds1 = ah.generateSpeeds(pos1, G, m)
speeds2 = ah.generateSpeeds(pos2, G, m)
pos = np.vstack((pos1, pos2))
speeds = np.vstack((speeds1, speeds2))
system = ah.createArray(pos, speeds)
sim = ah.Simulation(system, dim = 3, dt = 1e-3, G = G, mass_unit = m, epsilon = 1e-2)
sim.start(5000, save_to_array_every = 125, print_progress = True)
# if boxes are wanted: boxed = True
ani = sim.makeAnimation()
sim.ax.set_xlim(-3, 5)
sim.ax.set_ylim(-3, 5)
# ani.save("collision3d.gif", writer="imagemagick", dpi = 72, fps = 12)
plt.show()
|
[
"js.barbosa10@uniandes.edu.co"
] |
js.barbosa10@uniandes.edu.co
|
71e5f0f9c68ae0973f094f30416e50780b207773
|
dea85fb5330baf4ed9a185f040b258ef02bfa844
|
/projecteuler/problem_005.py
|
813f0d0ce716ad19ed0a0c6c0d538c799d34d4af
|
[] |
no_license
|
icejoywoo/school
|
595018d08bb971171106101dcd463bc435f29eff
|
bfc9ac94d67e02264da28055a932a86602efb2dc
|
refs/heads/master
| 2022-07-24T02:43:37.747264
| 2016-04-29T07:14:42
| 2016-04-29T07:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
#!/usr/bin/env python2.7
# encoding: utf-8
from __future__ import division
import math
def prime(end):
for i in xrange(2, end+1):
flag = True
for j in xrange(2, int(math.sqrt(i))+1):
if i % j == 0:
flag = False
if flag:
yield i
def prime_factor(number):
n = number
k = 1
while n != 1:
for k in prime(number):
if n % k == 0:
yield k
n = n / k
break
if __name__ == '__main__':
all_prime = {}
for i in range(2, 21):
prime_counter = {}
for j in prime_factor(i):
prime_counter.setdefault(j, 0)
prime_counter[j] += 1
for k, v in prime_counter.items():
if all_prime.get(k, 0) < v:
all_prime[k] = v
print all_prime
r = reduce(lambda x, y: x * y, [k**v for k, v in all_prime.items()])
for i in range(1, 21):
print i, r / i
print r
|
[
"icejoywoo@gmail.com"
] |
icejoywoo@gmail.com
|
b8ae343b776a8e117360d6f81ec56f8bb36bde1c
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/sARz4TDdxCuqK6pja_12.py
|
60b354a533deca46411ecac892304d607e16cdd0
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
"""
**Mubashir** needs your help to identify the spread of a deadly virus. He can
provide you with the following parameters:
* A two-dimensional array `persons`, containing **affected persons 'V'** and **unaffected persons 'P'**.
* Number of hours `n`, each infected person is spreading the virus to one person _up, down, left and right_ **each hour**.
Your function should return the updated array containing affected and
unaffected persons after `n` hours.
### Examples
persons = [
["P", "P", "P", "P", "P"],
["V", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"]
]
deadly_virus(persons, 0) ➞ [
["P", "P", "P", "P", "P"],
["V", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"]
]
deadly_virus(persons, 1) ➞ [
["V", "P", "P", "P", "P"],
["V", "V", "P", "P", "P"],
["V", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"]
]
deadly_virus(persons, 2) ➞ [
["V", "V", "P", "P", "P"],
["V", "V", "V", "P", "P"],
["V", "V", "P", "P", "P"],
["V", "P", "P", "P", "P"],
["P", "P", "P", "P", "P"]
]
### Notes
N/A
"""
def deadly_virus(people, n):
len_x = len(people); len_y = len(people[0])
for hour in range(n):
infected = set()
for i in range(len_x):
for j in range(len_y):
if people[i][j] == 'V':
if i > 0:
infected.add((i - 1, j))
if i < (len_x - 1):
infected.add((i + 1, j))
if j > 0:
infected.add((i, j - 1))
if j < (len_y - 1):
infected.add((i, j + 1))
for i, j in infected:
people[i][j] = 'V'
return people
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0d32643ef83c8d8d718272d65f217f90ed5bc4bf
|
9505e191cb287507c7df05212ab562bea1eda553
|
/python_fishc/14.0.py
|
a439b8f61d38e52515082a5498ca97088d36971d
|
[
"MIT"
] |
permissive
|
iisdd/Courses
|
c7a662305f3efe7d61eb23f766381290b1107bb8
|
a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c
|
refs/heads/main
| 2023-04-15T17:40:36.474322
| 2021-04-27T14:31:42
| 2021-04-27T14:31:42
| 316,904,233
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
'''0. 请写一个密码安全性检查的代码代码:check.py'''
# 密码安全性检查代码
#
# 低级密码要求:
# 1. 密码由单纯的数字或字母组成
# 2. 密码长度小于等于8位
#
# 中级密码要求:
# 1. 密码必须由数字、字母或特殊字符(仅限:~!@#$%^&*()_=-/,.?<>;:[]{}|\)任意两种组合
# 2. 密码长度不能低于8位
#
# 高级密码要求:
# 1. 密码必须由数字、字母及特殊字符(仅限:~!@#$%^&*()_=-/,.?<>;:[]{}|\)三种组合
# 2. 密码只能由字母开头
# 3. 密码长度不能低于16位
def check():
symbol = '~!@#$%^&*()_=-/,.?<>;:[]{}|\\'
test = input('请输入需要检查的密码组合:')
length = len(test)
flag = 0
notice = '''请按以下方式提升宁的密码安全级别:
1.密码必须由数字、字母及特殊字符三种组合
2.密码只能由字母开头
3.密码长度不能低于16位'''
print('宁的密码安全级别评定为:' , end ='')
for each in test:
if each in symbol:
flag = 1
break
if test.isalnum() or length <= 8:
print('低')
print(notice)
elif test[0].isalpha() and length >= 16 and flag == 1 :
print('高')
print('请继续保持')
return True
else:
print('中')
print(notice)
while 1 :
if check():
break
|
[
"noreply@github.com"
] |
iisdd.noreply@github.com
|
6b2fb25453491c119aaf9cf115995aecb0ca1840
|
c077ee590d003ebada9e292bed0de8cc27fc1c7b
|
/other/sqllite.py
|
dccef0327b666dbc654e328b29879e2898378103
|
[] |
no_license
|
pwchen21/pros
|
bb744bc451a0ede2a31a6a5c74f7cda7f5abf206
|
9b48e2ec40a3eea12c79b89a00b2be60d65cc8d1
|
refs/heads/master
| 2023-07-07T11:50:52.599589
| 2023-06-24T19:46:02
| 2023-06-24T19:46:02
| 140,359,861
| 0
| 0
| null | 2020-10-19T17:53:52
| 2018-07-10T01:17:30
|
Python
|
UTF-8
|
Python
| false
| false
| 786
|
py
|
import sqlite3
conn=sqlite3.connect(r'D:\se\py\db\test.db')
# Create Table
#conn.execute('CREATE TABLE USER ( `ID` INTEGER PRIMARY KEY AUTOINCREMENT, `NAME` TEXT NOT NULL, `NICKNAME` TEXT, `PASSWORD` TEXT, `MAIL` TEXT )')
# Insert Data
'''
conn.execute('INSERT INTO USER (NAME, NICKNAME) VALUES ("Tester1", "N1");')
conn.execute('INSERT INTO USER (NAME, NICKNAME) VALUES ("Tester2", "N2");')
conn.execute('INSERT INTO USER (NAME, NICKNAME) VALUES ("Tester2", "N2");')
'''
# Commit Insert
conn.commit()
# Get User Data
cursor=conn.execute('SELECT P from USER')
# Print Data in ROW
for x in cursor:
#print('ID: ', x[0],' ','NAME:', x[1],' ', 'NICKNAME: ', x[2])
if x[1] == 'Tester1':
print('Nickname:', x[2])
conn.close()
#, (idr.get(), nic.get(), mailr.get(), pwr.get()))
|
[
"pwchen21@gmail.com"
] |
pwchen21@gmail.com
|
3321af51db5e0bf76d7c034134aa3971bf647c1d
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/__init__.py
|
126fc2da1e4c41925328ad15223c68599c5addbe
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 6,865
|
py
|
# 2017.05.04 15:23:45 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/__init__.py
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.framework import ScopeTemplates
from gui.Scaleform.framework import ViewSettings, GroupedViewSettings, ViewTypes
from gui.Scaleform.framework.package_layout import PackageBusinessHandler
from gui.Scaleform.genConsts.CONTEXT_MENU_HANDLER_TYPE import CONTEXT_MENU_HANDLER_TYPE
from gui.Scaleform.genConsts.PREBATTLE_ALIASES import PREBATTLE_ALIASES
from gui.app_loader.settings import APP_NAME_SPACE
from gui.shared import EVENT_BUS_SCOPE
from gui.shared.utils.functions import getViewName
def getContextMenuHandlers():
from gui.Scaleform.daapi.view.lobby.prb_windows.PrebattleUserCMHandler import PrebattleUserCMHandler
return ((CONTEXT_MENU_HANDLER_TYPE.PREBATTLE_USER, PrebattleUserCMHandler),)
def getViewSettings():
from gui.Scaleform.daapi.view.lobby.prb_windows import invite_windows
from gui.Scaleform.daapi.view.lobby.prb_windows.BattleSessionList import BattleSessionList
from gui.Scaleform.daapi.view.lobby.prb_windows.BattleSessionWindow import BattleSessionWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.CompanyListView import CompanyListView
from gui.Scaleform.daapi.view.lobby.prb_windows.CompanyMainWindow import CompanyMainWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.CompanyRoomView import CompanyRoomView
from gui.Scaleform.daapi.view.lobby.SendInvitesWindow import SendInvitesWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.SquadPromoWindow import SquadPromoWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.squad_view import SquadView, FalloutSquadView
from gui.Scaleform.daapi.view.lobby.prb_windows.squad_view import EventSquadView
from gui.Scaleform.daapi.view.lobby.prb_windows.squad_window import SquadWindow, FalloutSquadWindow, EventSquadWindow
from gui.Scaleform.daapi.view.lobby.prb_windows.SwitchPeripheryWindow import SwitchPeripheryWindow
return (GroupedViewSettings(PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, SendInvitesWindow, 'sendInvitesWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, invite_windows.AutoInviteWindow, 'receivedInviteWindow.swf', ViewTypes.WINDOW, 'receivedInviteWindow', None, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.SQUAD_WINDOW_PY, SquadWindow, 'squadWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.SQUAD_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.FALLOUT_SQUAD_WINDOW_PY, FalloutSquadWindow, 'squadWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.FALLOUT_SQUAD_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.EVENT_SQUAD_WINDOW_PY, EventSquadWindow, 'squadWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.EVENT_SQUAD_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.COMPANY_WINDOW_PY, CompanyMainWindow, 'companyMainWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.COMPANY_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, BattleSessionWindow, 'battleSessionWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, BattleSessionList, 'battleSessionList.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),
GroupedViewSettings(VIEW_ALIAS.SQUAD_PROMO_WINDOW, SquadPromoWindow, 'squadPromoWindow.swf', ViewTypes.WINDOW, '', None, ScopeTemplates.DEFAULT_SCOPE),
GroupedViewSettings(VIEW_ALIAS.SWITCH_PERIPHERY_WINDOW, SwitchPeripheryWindow, 'switchPeripheryWindow.swf', ViewTypes.TOP_WINDOW, '', None, ScopeTemplates.DEFAULT_SCOPE),
ViewSettings(PREBATTLE_ALIASES.SQUAD_VIEW_PY, SquadView, None, ViewTypes.COMPONENT, None, ScopeTemplates.DEFAULT_SCOPE),
ViewSettings(PREBATTLE_ALIASES.EVENT_SQUAD_VIEW_PY, EventSquadView, None, ViewTypes.COMPONENT, None, ScopeTemplates.DEFAULT_SCOPE),
ViewSettings(PREBATTLE_ALIASES.FALLOUT_SQUAD_VIEW_PY, FalloutSquadView, None, ViewTypes.COMPONENT, None, ScopeTemplates.DEFAULT_SCOPE),
ViewSettings(PREBATTLE_ALIASES.COMPANY_LIST_VIEW_PY, CompanyListView, None, ViewTypes.COMPONENT, None, ScopeTemplates.DEFAULT_SCOPE),
ViewSettings(PREBATTLE_ALIASES.COMPANY_ROOM_VIEW_PY, CompanyRoomView, None, ViewTypes.COMPONENT, None, ScopeTemplates.DEFAULT_SCOPE))
def getBusinessHandlers():
return (_PrbPackageBusinessHandler(),)
class _PrbPackageBusinessHandler(PackageBusinessHandler):
def __init__(self):
listeners = ((PREBATTLE_ALIASES.SQUAD_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.EVENT_SQUAD_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.FALLOUT_SQUAD_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.COMPANY_WINDOW_PY, self.__showCompanyMainWindow),
(PREBATTLE_ALIASES.BATTLE_SESSION_ROOM_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.BATTLE_SESSION_LIST_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.SEND_INVITES_WINDOW_PY, self.__showPrebattleWindow),
(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, self.__showAutoInviteWindow),
(VIEW_ALIAS.SQUAD_PROMO_WINDOW, self.loadViewByCtxEvent),
(VIEW_ALIAS.SWITCH_PERIPHERY_WINDOW, self.loadViewByCtxEvent))
super(_PrbPackageBusinessHandler, self).__init__(listeners, APP_NAME_SPACE.SF_LOBBY, EVENT_BUS_SCOPE.LOBBY)
def __showPrebattleWindow(self, event):
alias = name = event.eventType
self.loadViewWithDefName(alias, name, event.ctx)
def __showAutoInviteWindow(self, event):
alias = PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY
name = getViewName(PREBATTLE_ALIASES.AUTO_INVITE_WINDOW_PY, event.ctx.get('prbID'))
self.loadViewWithDefName(alias, name, event.ctx)
def __showCompanyMainWindow(self, event):
alias = name = PREBATTLE_ALIASES.COMPANY_WINDOW_PY
window = self.findViewByAlias(ViewTypes.WINDOW, alias)
if window is not None:
window.updateWindowState(event.ctx)
else:
self.loadViewWithDefName(alias, name, event.ctx if event else None)
return
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\prb_windows\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:23:45 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
ab5ccec07088f8d9d3787c3d31cfc69fcc04f54f
|
bb8838e3eec624fd35a61d6d646f941eac1b266a
|
/saga/utils/threads.py
|
69f6f1c07f22481e262743a916595b2708709f1f
|
[
"MIT"
] |
permissive
|
agrill/saga-python
|
55087c03e72635ffbb2fe1ca56b5cc02b7ff2094
|
35101e3a40d3cfcb39cb9f0d0c5f64c6f8de5930
|
refs/heads/master
| 2021-01-22T10:14:11.922145
| 2013-11-19T14:38:50
| 2013-11-19T14:38:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,604
|
py
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import sys
import threading
import saga.exceptions as se
import saga.utils.misc as sumisc
_out_lock = threading.RLock ()
# ------------------------------------------------------------------------------
#
NEW = 'New'
RUNNING = 'Running'
FAILED = 'Failed'
DONE = 'Done'
# ------------------------------------------------------------------------------
#
def lout (txt, stream=sys.stdout) :
with _out_lock :
stream.write (txt)
stream.flush ()
# ------------------------------------------------------------------------------
#
class Thread (threading.Thread) : pass
def Event (*args, **kwargs) :
return threading.Event (*args, **kwargs)
# ------------------------------------------------------------------------------
#
class RLock (object) :
# see http://stackoverflow.com/questions/6780613/
# is-it-possible-to-subclass-lock-objects-in-python-if-not-other-ways-to-debug
# ------------------------------------------------------------------------------
#
def __init__ (self, obj=None) :
self._lock = threading.RLock ()
# with self._lock :
# self._obj = obj
# self._cnt = 0
# ------------------------------------------------------------------------------
#
def acquire (self) :
# ind = (self._cnt)*' '+'>'+(30-self._cnt)*' '
# lout ("%s -- %-10s %50s acquire - %s\n" % (ind, threading.current_thread().name, self, self._lock))
self._lock.acquire ()
# self._cnt += 1
# ind = (self._cnt)*' '+'|'+(30-self._cnt)*' '
# lout ("%s %-10s %50s acquired - %s\n" % (ind, threading.current_thread().name, self, self._lock))
# ------------------------------------------------------------------------------
#
def release (self) :
# ind = (self._cnt)*' '+'-'+(30-self._cnt)*' '
# lout ("%s %-10s %50s release - %s\n" % (ind, threading.current_thread().name, self, self._lock))
self._lock.release ()
# self._cnt -= 1
# ind = (self._cnt)*' '+'<'+(30-self._cnt)*' '
# lout ("%s -- %-10s %50s released - %s\n" % (ind, threading.current_thread().name, self, self._lock))
# ------------------------------------------------------------------------------
#
def __enter__ (self) : self.acquire ()
def __exit__ (self, type, value, traceback) : self.release ()
# ------------------------------------------------------------------------------
#
class SagaThread (Thread) :
def __init__ (self, call, *args, **kwargs) :
if not callable (call) :
raise se.BadParameter ("Thread requires a callable to function, not %s" \
% (str(call)))
Thread.__init__ (self)
self._call = call
self._args = args
self._kwargs = kwargs
self._state = NEW
self._result = None
self._exception = None
self.daemon = True
@classmethod
def Run (self, call, *args, **kwargs) :
t = self (call, *args, **kwargs)
t.start ()
return t
@property
def tid (self) :
return self.tid
def run (self) :
try :
self._state = RUNNING
self._result = self._call (*self._args, **self._kwargs)
self._state = DONE
except Exception as e :
print ' ========================================== '
print repr(e)
print ' ========================================== '
print str(e)
print ' ========================================== '
print sumisc.get_trace ()
print ' ========================================== '
self._exception = e
self._state = FAILED
def wait (self) :
if self.isAlive () :
self.join ()
def cancel (self) :
# FIXME: this is not really implementable generically, so we ignore
# cancel requests for now.
pass
def get_state (self) :
return self._state
state = property (get_state)
def get_result (self) :
if not self._state == DONE :
return None
return self._result
result = property (get_result)
def get_exception (self) :
if not self._state == FAILED :
return None
return self._exception
exception = property (get_exception)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"andre@merzky.net"
] |
andre@merzky.net
|
d2712d6ad0380ba56c92dbda2082dd9c9a137afa
|
c2f35e5d3cfbbb73188a0cd6c43d161738e63bd1
|
/07-mini-web框架/06-通过传递字典实现浏览器请求的资源不一样得到响应不一样/web_server.py
|
d55807d19181b3f74fd963dd53e7597e6e885e4b
|
[] |
no_license
|
yangh-zzf-itcast/Python_heima_Study
|
2a7cd0d801d9d6f49548905d373bb409efc4b559
|
7d753c1cdd5c46a0e78032e12b1d2f5d9be0bf68
|
refs/heads/master
| 2020-04-30T06:59:04.000451
| 2019-04-19T12:15:30
| 2019-04-19T12:15:30
| 176,670,172
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,940
|
py
|
import socket
import re
import multiprocessing
import mini_frame # 逻辑处理代码模块
class WSGIServer(object):
"""WSGI服务器类"""
def __init__(self):
# 1. 创建套接字
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设定套接字选项, 可以重复使用地址
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2. 绑定
self.tcp_server_socket.bind(("", 7890))
# 3. 监听
self.tcp_server_socket.listen(128)
def service_client(self, tcp_client_socket):
"""为客户端服务"""
# 1. 接收浏览器发送过来的 http 请求
# GET /index.html HTTP/1.1
# ......
#
# 请求数据内容,对数据内容进行解码
request = tcp_client_socket.recv(1024).decode("utf-8")
print(request)
try:
# 对接收到的请求协议字符串进行按行切割
# 返回的是由每一行组成的一个列表
request_lines = request.splitlines()
# 第一行就是http请求头,其中有浏览器需要访问的文件名
ret = re.match(r"[^/]+(/[^ ]*)", request_lines[0])
# 获取文件名 /index.html
if ret:
file_name = ret.group(1)
if file_name == "/":
file_name = "/index.html"
else:
pass
except IndexError:
file_name = "/index.html"
# 2.返回http格式的数据给浏览器
# 如果请求的资源不是以.py为结尾,那么就认为是静态资源(html/css/js/png,jpg等)
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, "rb")
except:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "------file not found------"
tcp_client_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 发给浏览器的数据----header
# 注意末尾换行一定要加上\r\n 表示换行
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n" # 在协议头和 请求的数据之间有一个空行
# 2.2 发给浏览器的数据----body
# response += "<h1>YangHang love ZhangZifan</h1>"
# 发送回应头
tcp_client_socket.send(response.encode("utf-8"))
# 发送客户端请求的内容
tcp_client_socket.send(html_content)
else:
# 如果是以.py结尾,那么就认为是动态资源请求
# body = "hhhh"
# if file_name == "/login.py":
# body = mini_frame.login()
# 实现解耦, 在简单框架内进行逻辑处理
#WSGI协议
env = dict() # 字典存储浏览器要访问的信息
env['PATH_INFO'] = file_name
body = mini_frame.application(env, self.set_response_header)
header = "HTTP/1.1 %s\r\n" % self.status
# 遍历响应头的元组
for temp in self.headers:
header +="%s:%s\r\n" % (temp[0], temp[1])
header += "\r\n"
response = header + body
tcp_client_socket.send(response.encode("utf-8"))
# 关闭服务套接字
tcp_client_socket.close()
# 将函数引用传入框架的application函数,获得响应头信息,然后存入实例属性中
def set_response_header(self, status, headers):
self.status = status
# 与服务器相关的信息,在服务器的函数内添加,与框架的信息区分开
self.headers = [('server:','mini_web v1.0')]
# 服务器信息与框架信息合并
self.headers += headers
def run_forever(self):
"""完成服务器的整体控制,无限循环运行"""
while True:
# 4. 等待新客户端的连接
new_socket, client_addr = self.tcp_server_socket.accept()
# 5. 创建一个子进程为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 关闭父进程中的 new_socket
new_socket.close()
# 关闭监听套接字
self.tcp_server_socket.close()
def main():
"""控制整体,创建一个web服务器对象,然后调用这个对象的run_forever方法运行"""
wsgi_server = WSGIServer()
wsgi_server.run_forever()
if __name__ == "__main__":
main()
|
[
"2459846416@qq.com"
] |
2459846416@qq.com
|
470f082d4e4f775112b238965cc902c710b8d8b6
|
521efcd158f4c69a686ed1c63dd8e4b0b68cc011
|
/airflow/api_connexion/endpoints/version_endpoint.py
|
077d7f8a1cfe4dcc05b12aafbc14528af5d0c696
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
coutureai/RaWorkflowOrchestrator
|
33fd8e253bfea2f9a82bb122ca79e8cf9dffb003
|
cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f
|
refs/heads/main
| 2022-10-01T06:24:18.560652
| 2021-12-29T04:52:56
| 2021-12-29T04:52:56
| 184,547,783
| 5
| 12
|
Apache-2.0
| 2022-11-04T00:02:55
| 2019-05-02T08:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import NamedTuple, Optional
import airflow
from airflow.api_connexion.schemas.version_schema import version_info_schema
from airflow.api_connexion.types import APIResponse
from airflow.utils.platform import get_airflow_git_version
class VersionInfo(NamedTuple):
"""Version information"""
version: str
git_version: Optional[str]
def get_version() -> APIResponse:
"""Get version information"""
airflow_version = airflow.__version__
git_version = get_airflow_git_version()
version_info = VersionInfo(version=airflow_version, git_version=git_version)
return version_info_schema.dump(version_info)
|
[
"noreply@github.com"
] |
coutureai.noreply@github.com
|
a5fdff54dca575404713d802a9baf77ff4c1e16c
|
72579db4299be6d512a766ce38ae50e3c7753368
|
/.history/Pythonlearning/day9_20200802095738.py
|
8bf2b42a5e3793092b534eb4601b7c71e24450db
|
[] |
no_license
|
moteily/Python_Learning
|
f0d1abf360ad417112051ba52f32a141452adb2d
|
c294aa1e373254739fb372918507cd7dbe12c999
|
refs/heads/master
| 2022-11-26T11:09:48.145308
| 2020-08-04T08:47:15
| 2020-08-04T08:47:15
| 284,379,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
# 接上一天的第九章
# 静态方法和类方法:
# 定义和表示:静态方法和类方法分别包装在staticmethod和classmethod类的对象中。
# 静态方法的定义中没有参数self,可直接通过类调用。类方法的定义中包含类似self的参数,
# 通常被命名为cls。对于类方法,也可通过对象直接调用,但参数cls将自动关联到类。如下
class Myclass:
def smeth():
print('This is a static method')
smeth = staticmethod(smeth)
def cmeth(cls):#类方法的特有参数
print('This is a class method of ', cls)
cmeth = classmethod(cmeth)
# 像这样手工包装和替换方法有点繁琐。引入了一种名为装饰器的新方法,可用于像这样包装方法.
# (实际上,装饰器可用于包含任何可调用的对象,并且可用于方法和函数)可指定一个或多个装饰器
# ,为此可在方法(或函数)前面使用运算符@列出这些装饰器(指定了多个装饰器,应用的顺序与列出
# 的顺序相反)
class Myclass:
@staticmethod
def smeth():
print('This is a static method')
@classmethod
def cmeth(cls):
print('This is a class method of',cls)
# 定义这些方法后,就像下面这样使用它们(无需实例化类):
Myclass.smeth()
Myclass.cmeth()
# __getattr__ ,__setattr等方法
# 可以拦截对象对象属性的所有访问企图,其用途之一是在旧类中实现特性(在旧式类中,函数property的
# 的行为可能不符合预期)要在属性被访问时执行一段代码,必须使用一些魔法方法
|
[
"994283977@qq.com"
] |
994283977@qq.com
|
9838043a9799e8e36f6c7fa133e5dbde8d92d24a
|
34ef7e2955415e92806dd918df0013e39539b99c
|
/tests/test_scheduler.py
|
919cedf0d2555b0e2251e3e1afc728d549fe3981
|
[] |
no_license
|
MasoniteFramework/scheduler
|
d2e843ba845e2cfe97197dbc50093f0b1ac9a07f
|
d783bef47df49893fdfc6dc761197fd5f1bb047c
|
refs/heads/3.0
| 2021-06-14T23:21:33.951389
| 2020-09-18T15:09:34
| 2020-09-18T15:09:34
| 135,189,744
| 1
| 2
| null | 2023-08-21T05:57:56
| 2018-05-28T17:17:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,189
|
py
|
import pytest
import pendulum
from src.masonite.scheduler.Task import Task
class MockTask(Task):
run_every = "5 minutes"
timezone = "America/New_York"
class TestScheduler:
def setup_method(self):
self.task = MockTask()
def test_scheduler_should_run(self):
assert self.task.run_every == "5 minutes"
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(22, 6, 5)
self.task._date = time
assert self.task.should_run(time) == False
def test_scheduler_should_run_every_minute(self):
self.task.run_every = "1 minute"
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(22, 6, 5)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_2_minutes(self):
self.task.run_every = "2 minutes"
time = pendulum.now().on(2018, 5, 21).at(14, 56, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(14, 58, 5)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_hour(self):
self.task.run_every = "1 hour"
time = pendulum.now().on(2018, 5, 21).at(2, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(3, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_every = "2 hours"
time = pendulum.now().on(2018, 5, 21).at(2, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_every = "2 hours"
time = pendulum.now().on(2018, 5, 21).at(3, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 5, 21).at(4, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_days(self):
self.task.run_every = "2 days"
time = pendulum.now().on(2018, 5, 21).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 5, 23).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
self.task.run_at = "5:30"
time = pendulum.now().on(2018, 5, 22).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_at = "5:35"
time = pendulum.now().on(2018, 5, 22).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == False
def test_scheduler_should_run_every_months(self):
self.task.run_every = "2 months"
time = pendulum.now().on(2018, 1, 1).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 2, 1).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 2, 1).at(10, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
self.task.run_at = "5:30"
time = pendulum.now().on(2018, 2, 1).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == False
def test_twice_daily_at_correct_time(self):
time = pendulum.now().on(2018, 1, 1).at(1, 20, 5)
self.task.run_every = ""
self.task.twice_daily = (1, 13)
self.task._date = time
assert self.task.should_run()
time = pendulum.now().on(2018, 1, 1).at(13, 20, 5)
self.task._date = time
assert self.task.should_run()
def test_twice_daily_at_incorrect_time(self):
time = pendulum.now().on(2018, 1, 1).at(12, 20, 5)
self.task.run_every = ""
self.task.twice_daily = (1, 13)
self.task._date = time
assert self.task.should_run() is False
def test_run_at(self):
self.task.run_every = ""
self.task.run_at = None
self.task.run_at = "13:00"
time = pendulum.now().on(2018, 1, 1).at(13, 0, 5)
self.task._date = time
self.task.run_at = "13:05"
time = pendulum.now().on(2018, 1, 1).at(13, 5, 5)
self.task._date = time
assert self.task.should_run() is True
time = pendulum.now().on(2018, 1, 1).at(13, 6, 5)
self.task._date = time
assert self.task.should_run() is False
def test_method_calls(self):
task = MockTask()
task.at("13:00")
time = pendulum.now().on(2018, 1, 1).at(13, 0, 5)
task._date = time
task = MockTask()
task.every_minute()
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
task._date = time
assert task.should_run(time) == True
|
[
"idmann509@gmail.com"
] |
idmann509@gmail.com
|
052968e050a51a8a22ec5d942182c99cb8f68f01
|
3d96cee3f0c986c7195e7677d85e91dc837d8dd4
|
/Web/E/4/4.9/sql.py
|
709d060270f8e011120ba344fdb61e742a11439c
|
[] |
no_license
|
dannycrief/full-stack-web-dev-couse
|
7faffe1c9e6c39baf03d6ee54f716e4f8b4c8733
|
0b22bc84742d8e78bd6a2e03adfbc44137f3d607
|
refs/heads/master
| 2023-01-12T09:25:16.378035
| 2021-03-21T16:51:18
| 2021-03-21T16:51:18
| 220,825,261
| 0
| 1
| null | 2023-01-05T12:57:14
| 2019-11-10T17:34:02
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
from sqlalchemy import create_engine, MetaData, Table, and_, or_, asc
engine = create_engine('postgresql+psycopg2://postgres:211217ns@localhost:5433/movies')
conn = engine.connect()
|
[
"step.kozbvb@gmail.com"
] |
step.kozbvb@gmail.com
|
e223be854296cb648b6cd4f1db9b6eb064402213
|
d780df6e068ab8a0f8007acb68bc88554a9d5b50
|
/python/foreman/tests/testdata/path1/pkg1/pkg2/build.py
|
cda68e9a8ee9ba90409e8ffba39667c3487d8d67
|
[
"MIT"
] |
permissive
|
clchiou/garage
|
ed3d314ceea487b46568c14b51e96b990a50ed6f
|
1d72863d3a5f5d620b170f4dd36f605e6b72054f
|
refs/heads/master
| 2023-08-27T13:57:14.498182
| 2023-08-15T07:09:57
| 2023-08-15T19:53:52
| 32,647,497
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
from foreman import define_parameter, define_rule, get_relpath
if __name__ != 'pkg1.pkg2':
raise AssertionError('incorrect __name__: %s' % __name__)
if not __file__.endswith('tests/testdata/path1/pkg1/pkg2/build.py'):
raise AssertionError('incorrect __file__: %s' % __file__)
if str(get_relpath()) != 'pkg1/pkg2':
raise AssertionError('incorrect relpath: %s' % get_relpath())
COUNT = 0
if COUNT > 0:
raise AssertionError('load more than once')
COUNT += 1
define_parameter('par_x')
define_rule('rule_x').depend('//pkg1:pkg1')
|
[
"clchiou@gmail.com"
] |
clchiou@gmail.com
|
1c7e24b97e0bbeab4768fbcfa5cbbc723708b0a6
|
a8fffbce7bd4d4e7e91f07b7aaaf0801ca64686e
|
/0x0F-python-object_relational_mapping/11-model_state_insert.py
|
f8589486bf72442bd5c4c25a9548a01450e5c593
|
[] |
no_license
|
bmuha1/holbertonschool-higher_level_programming
|
8f603c07e4b3cb87d89c3a1fff9fd5cdef5bc9f5
|
79cca6ecb77ed8de65b55bcdd715a3a923c5cb3a
|
refs/heads/master
| 2020-07-22T15:52:04.069523
| 2020-02-13T23:29:50
| 2020-02-13T23:29:50
| 207,251,416
| 2
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/python3
"""
Write a script that adds the State object “Louisiana” to the database
hbtn_0e_6_usa
"""
if __name__ == "__main__":
from sys import argv
from model_state import Base, State
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'.format(argv[1], argv[2], argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
louisiana = State(name='Louisiana')
session.add(louisiana)
session.commit()
print(louisiana.id)
session.close()
|
[
"800@holbertonschool.com"
] |
800@holbertonschool.com
|
aa20facdce0abd3184f7a0b97c113bf2ae0b90f4
|
11656c882c83bb5ea364b7c92f763788fa4db5ae
|
/Pokhi/Pokhi/Rest/config.py
|
3a97163750987d4bda5f85416a3424376b97920f
|
[] |
no_license
|
abhijeetdtu/pokhiin
|
e28d22bd38975a1d25c5425c34a1ce6dce79b65e
|
1eb512924da7c59e18dcf0c95819fd8d9e85c03d
|
refs/heads/master
| 2021-05-14T04:03:07.621127
| 2018-04-28T18:08:51
| 2018-04-28T18:08:51
| 116,633,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import os
class Config:
ENV = {}
ENV["DATA_DIR"] = os.path.abspath(os.path.join(os.path.realpath(__file__), "../../static/Data/"))
print(ENV["DATA_DIR"])
if('OPENSHIFT_DATA_DIR' in os.environ):
ENV["DATA_DIR"] = os.environ['OPENSHIFT_DATA_DIR']
ENV["UPLOAD_FOLDER"] = os.path.abspath(os.path.join( ENV["DATA_DIR"], "UploadedFiles/"))
print(ENV["DATA_DIR"])
print(ENV["UPLOAD_FOLDER"])
ENV["OPEN_CV_HOME"] = "C:\\Users\\Abhijeet\\Downloads\\OpenCv\\opencv\\sources\\data"
if(os.path.exists(ENV["UPLOAD_FOLDER"]) == False):
os.mkdir(ENV["UPLOAD_FOLDER"])
|
[
"abhijeetdtu@gmail.com"
] |
abhijeetdtu@gmail.com
|
d1da6360b081ce9719d4418c3a06f2e027120c06
|
e44ff4069f5b559954e7a66685c86b054a70de7a
|
/MockVita 2/digit_pairs.py
|
72aa08d4b84cb9d5d1e7233c8d5b7013e00f0f86
|
[] |
no_license
|
SayanDutta001/Competitive-Programming-Codes
|
2912985e037f83bcde8e7fcb0036f1e31fa626df
|
6dac061c0a4b1c5e82b99ec134e9e77606508e15
|
refs/heads/master
| 2023-03-17T04:25:47.507594
| 2021-03-05T16:23:09
| 2021-03-05T16:23:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
def bitscore(s):
l = list(s)
score = int(max(s))*11 + int(min(s))*7
if score >= 100:
return str(score)[1:]
return str(score)
def pairs(a):
count = 0
t = []
for i in range(len(a)):
for j in range(i+1, len(a)):
if(((i+1)%2==0) and ((j+1)%2==0) and (t.count(a[i][0])<2) and (a[i][0] == a[j][0])):
count += 1
t.append(a[i][0])
elif(((i+1)%2==1) and ((j+1)%2==1) and (t.count(a[i][0])<2) and (a[i][0] == a[j][0])):
count += 1
t.append(a[i][0])
return count
n = int(input())
a = []
s = list(input().split())
for i in s:
a.append(bitscore(i))
#print(a)
print(pairs(a))
|
[
"khanujabhupinder09@gmail.com"
] |
khanujabhupinder09@gmail.com
|
e080a1ca9f234923883169d8071f48e08ec53e81
|
f6078890ba792d5734d289d7a0b1d429d945a03a
|
/hw4/submissions/duongmatthew/duongmatthew_24972_1303227_HW_4_3-1.py
|
5dd52c46487e761c59542a30f9077f7a76a40c30
|
[] |
no_license
|
huazhige/EART119_Lab
|
1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0
|
47931d6f6a2c7bc053cd15cef662eb2f2027712c
|
refs/heads/master
| 2020-05-04T23:40:53.709217
| 2019-06-11T18:30:45
| 2019-06-11T18:30:45
| 179,552,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 13:35:08 2019
- A new function, "my_Newton", that solves for a root depending on how small
the difference between the current and last fct value is, rather than how small
the fct value, itself, is.
author: maduong
"""
import numpy as np
#===================================================================================
# Fct Definitions
#===================================================================================
def my_Newton(fct, df_dx, x0):
"""
- implementation of Newton's method for solving f(x) = 0, when f'(x)
is known
"""
xn = float(x0)
eps = 1e-6
N = 20
i = 1
x_next = xn - fct(xn)/df_dx(xn)
# solved for the very first x_next term in order to define while loop
print(0 , 'fct_(x_next) -', abs(fct(x_next) - fct(xn)), x_next)
# printed out first set of values
while abs(fct(x_next) - fct(xn)) > eps and i < N:
xn = x_next # sets the first x_next term defined earlier to the new xn
x_next = xn - fct(xn)/df_dx(xn) # solved for new x_next
print(i , 'fct_(x_next) -', abs(fct(x_next) - fct(xn)), x_next)
i += 1
if abs(fct(x_next) - fct(xn)) < eps:
# now the loop stops if the difference of the fct values is less than eps
return x_next
else: #solution did not converge
return np.nan
|
[
"hge2@ucsc.edu"
] |
hge2@ucsc.edu
|
d02de539b71e1698a057d12c5f6f979c6ccada0e
|
98a1c37ccda91f2c4be14683f5899393f6b97d29
|
/04-Pygame/飞机大战.py
|
57d629dd0b7fc805a276a487a6064cf6f7a621b2
|
[] |
no_license
|
yeshixuan/Python
|
1b564d9e46b469f62f59a3a04b340abd68ea4419
|
98ba749ca9ea12004cdff1fdb7e002dea2f42096
|
refs/heads/master
| 2020-04-29T22:17:01.014401
| 2019-05-14T05:15:29
| 2019-05-14T05:15:29
| 176,442,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,413
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/3 22:00
# @Author : Yebiyun
# @Site :
# @File : 飞机大战.py
# @Software: PyCharm
import pygame
from pygame.locals import *
from sys import exit
from random import randint
# 定义窗口的分辨率
SCREEN_WIDTH = 480
SCREEN_HEIGHT = 640
hero_is_hit = False
ticks = 0
offset = {pygame.K_LEFT:0, pygame.K_RIGHT:0, pygame.K_UP:0, pygame.K_DOWN:0}
# 定义画面帧率
FRAME_RATE = 60
# 定义动画周期(帧数)
ANIMATE_CYCLE = 30
pos = [200, 500]
#英雄类
class Hero(pygame.sprite.Sprite):
def __init__(self, hero_surface, hero_init_pos):
super(Hero, self).__init__()
self.image = hero_surface
self.rect = self.image.get_rect()
self.rect.topleft = hero_init_pos
self.speed = 6
self.down_index = 0
self.bullet_sprite = pygame.sprite.Group()
def move(self, offset):
x = self.rect[0] + offset[pygame.K_RIGHT] - offset[pygame.K_LEFT]
y = self.rect[1] + offset[pygame.K_DOWN] - offset[pygame.K_UP]
if x < 0:
self.rect[0] = 0
elif x > SCREEN_WIDTH - self.rect.width:
self.rect[0] = SCREEN_WIDTH - self.rect.width
else:
self.rect[0] = x
if y < 0:
self.rect[1] = 0
elif y > SCREEN_HEIGHT - self.rect.height:
self.rect[1] = SCREEN_HEIGHT - self.rect.height
else:
self.rect[1] = y
# 子弹类
class Bullet(pygame.sprite.Sprite):
def __init__(self, bullet_surface, bullet_init_pos):
super().__init__()
self.image = bullet_surface
self.rect = self.image.get_rect()
self.rect.topleft = bullet_init_pos
self.speed = 8
def update(self):
self.rect.top -= self.speed
if self.rect.top < -self.rect.height:
self.kill()
# 敌机类
class Enemy(pygame.sprite.Sprite):
def __init__(self, enemy_surface, enemy_init_pos):
super().__init__()
self.image = enemy_surface
self.rect = self.image.get_rect()
self.rect.topleft = enemy_init_pos
self.speed = 2
self.down_index = 0
def update(self):
self.rect.top += self.speed
if self.rect.top > SCREEN_HEIGHT:
self.kill()
# 开始
pygame.init()
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
pygame.display.set_caption("飞机大战")
background = pygame.image.load("images/background.gif")
hero_surface = pygame.image.load("images/hero.gif")
hero_down_surface = []
hero_down_surface.append(pygame.image.load("images/hero1.gif"))
hero_down_surface.append(pygame.image.load("images/hero2.gif"))
hero_down_surface.append(pygame.image.load("images/hero3.gif"))
hero_down_surface.append(pygame.image.load("images/hero4.gif"))
bullet_surface = pygame.image.load("images/bullet.gif")
enemy_surface = pygame.image.load("images/smallplane.gif")
enemy_down_surface = []
enemy_down_surface.append(pygame.image.load("images/smallplane1.gif"))
enemy_down_surface.append(pygame.image.load("images/smallplane2.gif"))
enemy_down_surface.append(pygame.image.load("images/smallplane3.gif"))
enemy_down_surface.append(pygame.image.load("images/smallplane4.gif"))
gameover = pygame.image.load("images/gameover.gif")
hero = Hero(hero_surface, pos)
enemy_sprite = pygame.sprite.Group()
# 敌机击毁组
enemy_down_group = pygame.sprite.Group()
clock = pygame.time.Clock()
print(len(hero_down_surface))
while True:
clock.tick(FRAME_RATE)
screen.blit(background,(0,0))
screen.blit(hero.image, hero.rect)
if ticks % 10 == 0:
hero.bullet_sprite.add(Bullet(bullet_surface, hero.rect.midtop))
hero.bullet_sprite.update()
hero.bullet_sprite.draw(screen)
if ticks % ANIMATE_CYCLE == 0:
enemy_sprite.add(Enemy(enemy_surface, (randint(0,SCREEN_WIDTH-enemy_surface.get_width()), -enemy_surface.get_height())))
enemy_sprite.update()
enemy_sprite.draw(screen)
enemy_down_group.add(pygame.sprite.groupcollide(enemy_sprite, hero.bullet_sprite, True, True))
for enemy in enemy_down_group:
if ticks % (ANIMATE_CYCLE//2) != 0:
screen.blit(enemy_down_surface[enemy.down_index], enemy.rect)
else:
if enemy.down_index < 3:
enemy.down_index += 1
else:
enemy_down_group.remove(enemy)
enemy_list = pygame.sprite.spritecollide(hero, enemy_sprite, True)
if len(enemy_list):
hero_is_hit = True
enemy_down_group.add(enemy_list)
if hero_is_hit:
if ticks % (ANIMATE_CYCLE//2) != 0:
hero.image = hero_down_surface[hero.down_index]
else:
if hero.down_index < 3:
hero.down_index += 1
else:
break
pygame.display.update()
ticks += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
if event.type == pygame.KEYDOWN:
if event.key in offset:
offset[event.key] = hero.speed
if event.type == pygame.KEYUP:
if event.key in offset:
offset[event.key] = 0
hero.move(offset)
screen.blit(gameover,(0,0))
ticks = 0
while True:
clock.tick(FRAME_RATE)
ticks += 1
pygame.display.update()
if ticks % (5*ANIMATE_CYCLE) == 0:
break
|
[
"979697327@qq.com"
] |
979697327@qq.com
|
b1322642c22f16e262b114f04965e50a992a34ee
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/devices/v20200801/list_iot_hub_resource_keys_for_key_name.py
|
3135b5f75e15d808bf5dedb68d251d50c673538c
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 4,003
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListIotHubResourceKeysForKeyNameResult',
'AwaitableListIotHubResourceKeysForKeyNameResult',
'list_iot_hub_resource_keys_for_key_name',
]
@pulumi.output_type
class ListIotHubResourceKeysForKeyNameResult:
"""
The properties of an IoT hub shared access policy.
"""
def __init__(__self__, key_name=None, primary_key=None, rights=None, secondary_key=None):
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if rights and not isinstance(rights, str):
raise TypeError("Expected argument 'rights' to be a str")
pulumi.set(__self__, "rights", rights)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
The name of the shared access policy.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def rights(self) -> str:
"""
The permissions assigned to the shared access policy.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListIotHubResourceKeysForKeyNameResult(ListIotHubResourceKeysForKeyNameResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysForKeyNameResult(
key_name=self.key_name,
primary_key=self.primary_key,
rights=self.rights,
secondary_key=self.secondary_key)
def list_iot_hub_resource_keys_for_key_name(key_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysForKeyNameResult:
"""
The properties of an IoT hub shared access policy.
:param str key_name: The name of the shared access policy.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20200801:listIotHubResourceKeysForKeyName', __args__, opts=opts, typ=ListIotHubResourceKeysForKeyNameResult).value
return AwaitableListIotHubResourceKeysForKeyNameResult(
key_name=__ret__.key_name,
primary_key=__ret__.primary_key,
rights=__ret__.rights,
secondary_key=__ret__.secondary_key)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
1da89dbcd832978c8723bebd5c2fe3a26ce58426
|
61ff94d2987b3bc95f82c5a58897f50d1efa1db8
|
/hive/db/query_stats.py
|
68a1b207535d39dcfcfcad00384343b91a5c69cf
|
[
"MIT"
] |
permissive
|
arpwv/hivemind
|
ee77c9805731fda2bb95e1127a56152fe53b707a
|
a87e5578f9020be02c867021a8acdfff41f06777
|
refs/heads/master
| 2021-01-24T03:43:46.507207
| 2018-02-23T22:18:56
| 2018-02-23T22:18:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
import time
import re
import atexit
class QueryStats:
stats = {}
ttl_time = 0.0
def __init__(self):
atexit.register(QueryStats.print)
def __call__(self, fn):
def wrap(*args, **kwargs):
time_start = time.perf_counter()
result = fn(*args, **kwargs)
time_end = time.perf_counter()
QueryStats.log(args[1], (time_end - time_start) * 1000)
return result
return wrap
@classmethod
def log(cls, sql, ms):
nsql = cls.normalize_sql(sql)
cls.add_nsql_ms(nsql, ms)
cls.check_timing(nsql, ms)
if cls.ttl_time > 30 * 60 * 1000:
cls.print()
@classmethod
def add_nsql_ms(cls, nsql, ms):
if nsql not in cls.stats:
cls.stats[nsql] = [ms, 1]
else:
cls.stats[nsql][0] += ms
cls.stats[nsql][1] += 1
cls.ttl_time += ms
@classmethod
def normalize_sql(cls, sql):
nsql = re.sub(r'\s+', ' ', sql).strip()[0:256]
nsql = re.sub(r'VALUES (\s*\([^)]+\),?)+', 'VALUES (...)', nsql)
return nsql
@classmethod
def check_timing(cls, nsql, ms):
if ms > 100:
print("\033[93m[SQL][%dms] %s\033[0m" % (ms, nsql[:250]))
@classmethod
def print(cls):
if not cls.stats:
return
ttl = cls.ttl_time
print("[DEBUG] total SQL time: {}s".format(int(ttl / 1000)))
for arr in sorted(cls.stats.items(), key=lambda x: -x[1][0])[0:40]:
sql, vals = arr
ms, calls = vals
print("% 5.1f%% % 7dms % 9.2favg % 8dx -- %s"
% (100 * ms/ttl, ms, ms/calls, calls, sql[0:180]))
cls.clear()
@classmethod
def clear(cls):
cls.stats = {}
cls.ttl_time = 0
|
[
"roadscape@users.noreply.github.com"
] |
roadscape@users.noreply.github.com
|
fa36261172803b39dd43394343057295f6489945
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007-EOL/desktop/kde/transKode/actions.py
|
19d1559602adac4f930b844d24d29a64980cf019
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import kde
WorkDir = 'transkode'
def setup():
kde.configure()
def build():
kde.make()
def install():
kde.install()
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
3db4dccc53642460bd253d657b3e1e6860dc134f
|
0fdb402809188c34702bc70e4d106e56ca8e2bd0
|
/Algorithms/mobile.py
|
7cc1afd0b2f037442a49adc901644212600de01c
|
[] |
no_license
|
the07/Python
|
356f2018a85caeb9dd6ccb251636ff697eb613b6
|
af34cf3ffe01504632cf3654a0a5f89653e163cb
|
refs/heads/master
| 2021-01-06T20:36:33.718087
| 2017-11-24T06:58:32
| 2017-11-24T06:58:32
| 90,789,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
phone_numbers = []
for _ in range(int(input())):
S = input()
phone_numbers.append(S[-10:])
for i in sorted(phone_numbers):
print('+91 {} {}'.format(i[:5], i[5:]))
|
[
"thegauravks@gmail.com"
] |
thegauravks@gmail.com
|
d174932faeffa0b07a3e4466044164eb769e3dc1
|
609ec378fadcbd81a8307064cd11c0e27b585cca
|
/setup.py
|
57138ace5c78f29d4f361b9eb6d70b4b692207ea
|
[] |
no_license
|
oboberg/QuickReduce
|
b2184c212774e61f430ba62dda024ce672bd6dca
|
19f42ed8105a24b4191066915543ee70022b5bfb
|
refs/heads/master
| 2021-01-19T07:02:45.877576
| 2016-08-09T19:15:58
| 2016-08-09T19:15:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [
Extension("podi_cython",
sources=['cython_src/podi_cython.pyx',
"cython_src/sigma_clip_mean.c",
"cython_src/sigma_clip_median.c",
"cython_src/lacosmics.c",
],
include_dirs=["cython_src", numpy.get_include()],
libraries=['gslcblas', "gsl", "m"]
)
]
)
|
[
"kotulla@wisc.edu"
] |
kotulla@wisc.edu
|
c7b577d0088d48af19791f2282ca10b34db9bfcc
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/65/usersdata/201/32254/submittedfiles/investimento.py
|
57cbb58258e023fea00db2eb9598bfa4a3514d35
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
a=float(input('Investimento inicial:'))
b=float(input('Taxa de crescimento percentual:'))
x=(a*b)
Total=(a+x)
print('%.2f' %Total)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bbca92b38602a503a8e5e884d77be44b3b03e2c0
|
dd745566ceee1760c714b17cabd50d2a0a919747
|
/Stream-Three/django_todo_project/env/lib/python3.5/site-packages/corsheaders/conf.py
|
e5b964ccffb7dcd50b8ca4ad678d32401bd435c0
|
[] |
no_license
|
hyohannesgithub/full_stack_solutions
|
668fc9de020aa8aa18c64d38d13ca6bfcac12278
|
e572d6154c3d63681c124698d7962905f1384671
|
refs/heads/master
| 2021-01-13T02:53:27.623809
| 2017-01-04T02:49:30
| 2017-01-04T02:49:30
| 77,099,093
| 1
| 0
| null | 2016-12-22T01:24:02
| 2016-12-22T01:24:01
| null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
from django.conf import settings
from .defaults import default_headers, default_methods # Kept here for backwards compatibility
class Settings(object):
"""
Shadow Django's settings with a little logic
"""
@property
def CORS_ALLOW_HEADERS(self):
return getattr(settings, 'CORS_ALLOW_HEADERS', default_headers)
@property
def CORS_ALLOW_METHODS(self):
return getattr(settings, 'CORS_ALLOW_METHODS', default_methods)
@property
def CORS_ALLOW_CREDENTIALS(self):
return getattr(settings, 'CORS_ALLOW_CREDENTIALS', False)
@property
def CORS_PREFLIGHT_MAX_AGE(self):
return getattr(settings, 'CORS_PREFLIGHT_MAX_AGE', 86400)
@property
def CORS_ORIGIN_ALLOW_ALL(self):
return getattr(settings, 'CORS_ORIGIN_ALLOW_ALL', False)
@property
def CORS_ORIGIN_WHITELIST(self):
return getattr(settings, 'CORS_ORIGIN_WHITELIST', ())
@property
def CORS_ORIGIN_REGEX_WHITELIST(self):
return getattr(settings, 'CORS_ORIGIN_REGEX_WHITELIST', ())
@property
def CORS_EXPOSE_HEADERS(self):
return getattr(settings, 'CORS_EXPOSE_HEADERS', ())
@property
def CORS_URLS_REGEX(self):
return getattr(settings, 'CORS_URLS_REGEX', r'^.*$')
@property
def CORS_MODEL(self):
return getattr(settings, 'CORS_MODEL', None)
@property
def CORS_REPLACE_HTTPS_REFERER(self):
return getattr(settings, 'CORS_REPLACE_HTTPS_REFERER', False)
conf = Settings()
|
[
"aaronsnig@gmail.com"
] |
aaronsnig@gmail.com
|
e8fc466e3e5524ecb40f9b242aa5198d18146f25
|
02ce6d29fec0d68ca2a2a778d37d2f2cff1a590e
|
/Old/PythonOne/18.2.4-tcp-client.py
|
786c1fd2479c368035b73888d47e2d549a49b923
|
[] |
no_license
|
CalvinCheungCoder/Python-100-Days
|
605045122e40c119abc32466c32479559a4d4b9b
|
0f9bec8893954d4afbe2037dad92885c7d4d31f8
|
refs/heads/master
| 2020-04-17T11:49:42.148478
| 2019-09-19T10:22:37
| 2019-09-19T10:22:37
| 166,556,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 8880))
s.send(b'hello')
data = s.recv(1024)
print('从服务器接收的消息:{0}'.format(data.decode()))
s.close()
|
[
"984382258@qq.com"
] |
984382258@qq.com
|
6bcf2b63c59525f3e7ccc1b4759864d27f05aae2
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5751500831719424_0/Python/Bremsstrahlung/repeater.py
|
d918e0d1d86b831dda2d3f18915cfd9f173d497b
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
input = open("A-small-attempt1.in")
output = open("output.out","w")
t = int(input.readline())
for i in range(t):
n = int(input.readline())
r = 0
impossible = False
lines = []
iter = []
for x in range(n):
lines.append(input.readline().strip())
iter.append(0)
seq = ""
for x in range(len(lines)):
lineseq = ""
for y in range(len(lines[x])):
if len(lineseq) == 0 or lines[x][y] != lineseq[-1]:
lineseq += lines[x][y]
if x == 0:
seq = lineseq
elif lineseq != seq:
impossible = True
for x in range(len(seq)):
ns = []
for y in range(len(lines)):
ns.append(0)
if iter[y] >= len(lines[y]):
impossible = True
break
if lines[y][iter[y]] == seq[x]:
while True:
if lines[y][iter[y]] == seq[x]:
ns[y] += 1
iter[y] += 1
else:
break
if iter[y] >= len(lines[y]):
break
else:
impossible = True
break
if not impossible:
op = []
for y in range(len(ns)):
q = 0
for z in range(len(ns)):
if z != y:
q += abs(ns[y] - ns[z])
op.append(q)
r += min(op)
if impossible:
output.write("Case #{}: Fegla Won\n".format(i + 1))
print("Case #{}: Fegla Won".format(i + 1))
else:
output.write("Case #{}: {}\n".format(i + 1,r))
print("Case #{}: {}".format(i + 1,r))
output.close();
input.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
6569fbcc6eb836ab5ada7f0d7b0beac36b3a8ac8
|
0f47b8b3775e1730f92141128491b0bbfe3d89e0
|
/OOP/SOLID/examples/open_closed/after/character.py
|
d9d293c50eead1ee7650593e0d7a14a6d3fdb875
|
[] |
no_license
|
hongmin0907/CS
|
1d75c38da98c6174ea19de163c850d0f3bac22e3
|
697e8e1a5bde56a7588381a12f74bbb0e3aee3e8
|
refs/heads/master
| 2020-06-23T20:10:22.051477
| 2019-07-15T00:20:09
| 2019-07-15T00:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,170
|
py
|
# 추상화 타입을 사용해 프로그래밍
# CLOSED FOR MODIFICATION
# 공격 종류를 확장해도 캐릭터의 공격 코드는 변하지 않는다.
from abc import ABCMeta, abstractmethod
from attack_kind import (AttackKindFactory, FireAttackKind, IceAttackKind,
StoneAttackKind, KungfuAttackKind)
class Character(metaclass=ABCMeta):
def __init__(self, name, hp, power):
self.name=name
self.hp=hp
self.power=power
@abstractmethod
def attack(self, other, kind):
pass
@abstractmethod
def get_damage(self, power, attack_kind):
pass
def __str__(self):
return f'{self.name} : {self.hp}'
class Player(Character):
def __init__(self, name='player', hp=100, power=10, *attack_kinds):
super().__init__(name, hp, power)
self.skills=[]
for attack_kind in attack_kinds:
self.skills.append(attack_kind)
def attack(self, other, a_kind):
for attack_kind in self.skills:
if a_kind==attack_kind.get_kind():
other.get_damage(self.power, a_kind)
attack_kind.attack()
def get_damage(self, power, a_kind):
for attack_kind in self.skills:
if attack_kind.get_kind()==a_kind:
self.hp-=(power//2)
return
self.hp-=power
class Monster(Character):
@classmethod
def get_monster_kind(cls):
return cls.__name__.replace('Monster', '')
def __init__(self, name='Monster', hp=50, power=5):
super().__init__(name, hp, power)
self.name=self.get_monster_kind()+name
self.attack_kind=AttackKindFactory(self.get_monster_kind())
def attack(self, other, a_kind):
if self.attack_kind.get_kind()==a_kind:
other.get_damage(self.power, a_kind)
self.attack_kind.attack()
def get_damage(self, power, a_kind):
if a_kind==self.attack_kind.get_kind():
self.hp+=power
else:
self.hp-=power
def get_attack_kind(self):
return self.attack_kind.get_kind()
@abstractmethod
def generate_gold(self):
pass
# 게임 개발 초기의 몬스터 종류는 두 가지
class FireMonster(Monster):
def generate_gold(self):
return 10
class IceMonster(Monster):
def __init__(self):
super().__init__()
self.hp=100
def generate_gold(self):
return 20
# 게임 규모가 커지면서 추가된 몬스터
class StoneMonster(Monster):
def generate_gold(self):
return 0
class KungfuMonster(Monster):
def generate_gold(self):
return 1000
if __name__=="__main__":
fm=FireMonster()
im=IceMonster()
sm=StoneMonster()
kfm=KungfuMonster()
monsters=[]
monsters.extend((fm, im, sm, kfm))
player=Player('john', 120, 20, IceAttackKind(), FireAttackKind())
print(player)
for mon in monsters:
player.attack(mon, 'Fire')
for mon in monsters:
print(mon)
for mon in monsters:
print(mon.get_attack_kind())
mon.attack(player, mon.get_attack_kind())
print(player)
|
[
"ythwork83@gmail.com"
] |
ythwork83@gmail.com
|
3a2d8d4fd3ae54ef5535a568c0501b0c2090940f
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/quota/latency.py
|
c637d96fd9df153845b45061d928eece3556b401
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flops and Parameters Filter."""
import logging
import vega
from vega.metrics import calc_forward_latency_on_host
from vega.model_zoo import ModelZoo
from .quota_item_base import QuotaItemBase
class LatencyVerification(QuotaItemBase):
"""Latency Filter class."""
def __init__(self, latency_range):
self.latency_range = latency_range
def verify_on_host(self, model_desc):
"""Filter function of latency."""
model = ModelZoo.get_model(model_desc)
count_input = self.get_input_data()
trainer = vega.get_trainer(model_desc=model_desc)
sess_config = trainer._init_session_config() if vega.is_tf_backend() else None
latency = calc_forward_latency_on_host(model, count_input, sess_config)
logging.info(f"Sampled model's latency: {latency}ms")
if latency < self.latency_range[0] or latency > self.latency_range[1]:
logging.info(f"The latency ({latency}) is out of range. Skip this network.")
return False
else:
return True
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
2f30b44e7c114f7510f263f5587bfd1d560d6815
|
1e5f6ac1590fe64e2d5a2d8b036c0948847f668d
|
/codes/Module_2/lecture_7/lecture_7_16.py
|
f535e827c618a0f328550cad12cb40cbcb93bd19
|
[] |
no_license
|
Gedanke/Reptile_study_notes
|
54a4f48820586b1784c139716c719cc9d614c91b
|
a9705ebc3a6f95160ad9571d48675bc59876bd32
|
refs/heads/master
| 2022-07-12T23:43:24.452049
| 2021-08-09T12:54:18
| 2021-08-09T12:54:18
| 247,996,275
| 5
| 1
| null | 2022-06-26T00:21:48
| 2020-03-17T14:50:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
# -*- coding: utf-8 -*-
import requests
headers = {
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Connection': 'keep-alive',
}
requests.get("http://httpbin.org/cookies/set/number/123456789", headers=headers)
r = requests.get("http://httpbin.org/cookies")
print(r.text)
|
[
"13767927306@163.com"
] |
13767927306@163.com
|
0121fb0c0c35e0b76606d6c0541c3178447f1eed
|
d7faf47825b6f8e5abf9a9587f1e7248c0eed1e2
|
/rllib/tests/test_gpus.py
|
8a1f24311a4d3ac2d53928ee1f8f93bb19544e3e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ggdupont/ray
|
7d7c7f39a8f99a09199fab60897da9e48b8e2645
|
15391026c19f1cbbb8d412e46b01f7998e42f2b9
|
refs/heads/master
| 2023-03-12T06:30:11.428319
| 2021-12-07T05:34:27
| 2021-12-07T05:34:27
| 165,058,028
| 0
| 0
|
Apache-2.0
| 2023-03-04T08:56:50
| 2019-01-10T12:41:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer, DEFAULT_CONFIG
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.test_utils import framework_iterator
from ray import tune
torch, _ = try_import_torch()
class TestGPUs(unittest.TestCase):
def test_gpus_in_non_local_mode(self):
# Non-local mode.
ray.init()
actual_gpus = torch.cuda.device_count()
print(f"Actual GPUs found (by torch): {actual_gpus}")
config = DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["env"] = "CartPole-v0"
# Expect errors when we run a config w/ num_gpus>0 w/o a GPU
# and _fake_gpus=False.
for num_gpus in [0, 0.1, 1, actual_gpus + 4]:
# Only allow possible num_gpus_per_worker (so test would not
# block infinitely due to a down worker).
per_worker = [0] if actual_gpus == 0 or actual_gpus < num_gpus \
else [0, 0.5, 1]
for num_gpus_per_worker in per_worker:
for fake_gpus in [False] + ([] if num_gpus == 0 else [True]):
config["num_gpus"] = num_gpus
config["num_gpus_per_worker"] = num_gpus_per_worker
config["_fake_gpus"] = fake_gpus
print(f"\n------------\nnum_gpus={num_gpus} "
f"num_gpus_per_worker={num_gpus_per_worker} "
f"_fake_gpus={fake_gpus}")
frameworks = ("tf", "torch") if num_gpus > 1 else \
("tf2", "tf", "torch")
for _ in framework_iterator(config, frameworks=frameworks):
# Expect that trainer creation causes a num_gpu error.
if actual_gpus < num_gpus + 2 * num_gpus_per_worker \
and not fake_gpus:
# "Direct" RLlib (create Trainer on the driver).
# Cannot run through ray.tune.run() as it would
# simply wait infinitely for the resources to
# become available.
print("direct RLlib")
self.assertRaisesRegex(
RuntimeError,
"Found 0 GPUs on your machine",
lambda: PGTrainer(config, env="CartPole-v0"),
)
# If actual_gpus >= num_gpus or faked,
# expect no error.
else:
print("direct RLlib")
trainer = PGTrainer(config, env="CartPole-v0")
trainer.stop()
# Cannot run through ray.tune.run() w/ fake GPUs
# as it would simply wait infinitely for the
# resources to become available (even though, we
# wouldn't really need them).
if num_gpus == 0:
print("via ray.tune.run()")
tune.run(
"PG",
config=config,
stop={"training_iteration": 0})
ray.shutdown()
def test_gpus_in_local_mode(self):
# Local mode.
ray.init(local_mode=True)
actual_gpus_available = torch.cuda.device_count()
config = DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["env"] = "CartPole-v0"
# Expect no errors in local mode.
for num_gpus in [0, 0.1, 1, actual_gpus_available + 4]:
print(f"num_gpus={num_gpus}")
for fake_gpus in [False, True]:
print(f"_fake_gpus={fake_gpus}")
config["num_gpus"] = num_gpus
config["_fake_gpus"] = fake_gpus
frameworks = ("tf", "torch") if num_gpus > 1 else \
("tf2", "tf", "torch")
for _ in framework_iterator(config, frameworks=frameworks):
print("direct RLlib")
trainer = PGTrainer(config, env="CartPole-v0")
trainer.stop()
print("via ray.tune.run()")
tune.run(
"PG", config=config, stop={"training_iteration": 0})
ray.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"noreply@github.com"
] |
ggdupont.noreply@github.com
|
c15d9a7f00b2603e79791d3b4d43209b20ff32db
|
cb305a20202cd381af979702950311a1b92319f2
|
/Flask/Project/setup.py
|
2bcadb4c2cff7569f2e7e0c66fa9475e9af3c831
|
[] |
no_license
|
ShanjinurIslam/The-Stack
|
93a9bafb7355c471e2363bacddc0cfae5c5ae1c1
|
2d31ae8cf37dd9aceef06e067756e716a225f23b
|
refs/heads/master
| 2022-12-08T03:35:01.397484
| 2020-08-26T20:21:13
| 2020-08-26T20:21:13
| 287,058,289
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from setuptools import find_packages, setup
setup(
name='flaskr',
version='1.0.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['flask','flask_wtf'],
)
|
[
"spondoncsebuet@gmail.com"
] |
spondoncsebuet@gmail.com
|
cc45d0ee2e3786742bdb6ce3f8e243e4832d6541
|
2976433a213f354b6d387e1d957192a9871f7e40
|
/JavaScript/reactjs/basic01/testcode.py
|
f9087ac3d31b1989bf0f6a5a225583d5f66980a2
|
[] |
no_license
|
saurabh-kumar88/Coding-Practice-
|
90a6f6b8feb7a1d2316451b31c646a48dc6f9bf9
|
48f0bac728745c8978468974d55025da86f29486
|
refs/heads/master
| 2023-02-15T00:19:47.411973
| 2020-10-06T15:48:46
| 2020-10-06T15:48:46
| 280,220,900
| 0
| 1
| null | 2021-01-06T09:15:21
| 2020-07-16T17:51:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
class Solution:
def fizzBuzz(self, n: int):
ans = []
multiple_of_3, multiple_of_5 = False, False
for count in range(1, n+1):
if count % 3 == 0 and count % 5 == 0:
ans.append("FizzBuzz")
elif count % 3 == 0:
ans.append("Fizz")
elif count % 5 == 0:
ans.append("Buzz")
else:
ans.append(str(count))
return ans
if __name__ == "__main__":
obj = Solution()
print(obj.fizzBuzz(15))
|
[
"ykings.saurabh@gmail.com"
] |
ykings.saurabh@gmail.com
|
856571450b135b64c9414883a6320d798601aeae
|
aac9fd4a281ffac37fe8b2087f720001b5bcad7a
|
/mnist_sync_sharding_greedy/worker.py
|
cb909a02fe9d1d064155aaff4dfee04ee14db282
|
[
"MIT"
] |
permissive
|
epikjjh/Distributed-Deep-Learning
|
8d77875e9aa74855b29ac5bb8860b987ef798ec1
|
9762b99306771c0f7dadc58abe6bf7ebe5ed468f
|
refs/heads/master
| 2023-08-14T18:27:14.144482
| 2021-10-19T02:13:17
| 2021-10-19T02:13:17
| 266,505,936
| 1
| 3
|
MIT
| 2020-10-14T22:06:16
| 2020-05-24T09:09:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,193
|
py
|
from model import Model
from mpi4py import MPI
from typing import List
import numpy as np
import tensorflow as tf
import time,sys
from functools import reduce
class SyncWorker(Model):
def __init__(self, batch_size, rank, num_ps, num_workers):
super().__init__()
''' Modify var_bucket & var_shape for greedy ordering '''
# Sort parameters
tmp = {i: reduce(lambda x, y: x*y, self.var_shape[i].as_list()) for i in range(self.var_size)}
tmp = sorted(tmp, key=tmp.get)
# Reorder parameters
self.greedy_order = []
i = 0
j = len(tmp) - 1
while i < j:
self.greedy_order.append(tmp[i])
self.greedy_order.append(tmp[j])
i += 1
j -= 1
# Add mid value if the number of parameters is odd
if len(tmp) % 2:
self.greedy_order.append(tmp[i])
# Modify var_bucket
with tf.compat.v1.variable_scope("mnist", reuse=tf.compat.v1.AUTO_REUSE):
self.var_bucket = [tf.compat.v1.get_variable("v{}".format(i), shape=self.var_shape[i], dtype=tf.float32) for i in self.greedy_order]
# Modify var_shape
self.var_shape = [self.var_shape[i] for i in self.greedy_order]
# Set rank of worker
# rank: number of parameter servers ~ number of parameter servers + number of workers - 1
self.rank = rank
# Set number of parameter servers & workers
self.num_workers = num_workers
self.num_ps = num_ps
self.avg_var_size = self.var_size // self.num_ps
self.local_var_size = self.avg_var_size + self.var_size % self.num_ps
self.batch_size = batch_size
self.grad_buckets = [tf.compat.v1.placeholder(shape=self.var_shape[i], dtype=tf.float32) for i in range(self.var_size)]
self.senders = [tf.py_function(func=self.wrap_send(i), inp=[self.grad_buckets[i]], Tout=[]) for i in range(self.var_size)]
def wrap_send(self, num):
def send(grad):
# Send data to parameter server
ind = num // self.avg_var_size
if num >= self.var_size - self.local_var_size:
ind = self.num_ps-1
comm.Send([grad, MPI.FLOAT], dest=ind, tag=num-(ind*self.avg_var_size))
return None
return send
def work(self, cnt):
x_batch = self.x_train[self.batch_size*cnt:self.batch_size*(cnt+1)]
y_batch = self.y_train[self.batch_size*cnt:self.batch_size*(cnt+1)]
ret, = self.sess.run([self.grads], feed_dict={self.x: x_batch, self.y_: y_batch, self.keep_prob: 0.5})
grads = [grad for grad, var in ret] # gradient tuple
# Send gradients to each parameter server
for i in range(self.var_size):
self.sess.run([self.senders[i]], feed_dict={self.grad_buckets[i]: grads[self.greedy_order[i]]})
if __name__ == "__main__":
epoch = 1
batch_size = 100
comm = MPI.COMM_WORLD
# Set rank of worker
# rank: number of parameter servers ~ number of parameter servers + number of workers - 1
rank = comm.Get_rank()
# Set number of parameter servers & workers
num_workers = int(sys.argv[2])
num_ps = comm.Get_size() - num_workers
start = time.clock()
worker = SyncWorker(batch_size, rank, num_ps, num_workers)
# Send parameters to all parameter servers
if worker.rank == worker.num_ps:
data = {"size": worker.var_size, "shape": worker.var_shape, "total_batch": worker.x_train.shape[0]}
for i in range(worker.num_ps):
comm.send(data, dest=i, tag=0)
# For broadcasting
bucket = [np.empty(worker.var_shape[i], dtype=np.float32) for i in range(worker.var_size)]
ph_bucket = [tf.compat.v1.placeholder(shape=worker.var_shape[i], dtype=tf.float32) for i in range(worker.var_size)]
bucket_assign = [tf.compat.v1.assign(worker.var_bucket[i], ph_bucket[i]) for i in range(worker.var_size)]
for step in range(epoch):
batch_num = int(worker.x_train.shape[0]/batch_size)
for batch_cnt in range(batch_num):
# Calculate gradients then send them to parameter server
worker.work(batch_cnt)
# Receive data from parameter server
for i in range(worker.var_size):
ind = i // worker.avg_var_size
if i >= worker.var_size - worker.local_var_size:
ind = worker.num_ps-1
comm.Recv([bucket[i], MPI.FLOAT], source=ind, tag=i-(ind*worker.avg_var_size))
# Assign broadcasted values
worker.sess.run(bucket_assign, feed_dict={ph_bucket[i]:bucket[i] for i in range(worker.var_size)})
if batch_cnt % 10 == 0:
print("Worker{} epoch: {} batch: {} accuracy: {}".format(rank,step,batch_cnt,worker.sess.run(worker.accuracy, feed_dict={worker.x: worker.x_test, worker.y_: worker.y_test, worker.keep_prob: 1.0})))
end = time.clock()
print("Worker{} final accuracy: {}".format(rank,worker.sess.run(worker.accuracy, feed_dict={worker.x: worker.x_test, worker.y_: worker.y_test, worker.keep_prob: 1.0})))
print("Time: {}".format(end-start))
|
[
"epikjjh@gmail.com"
] |
epikjjh@gmail.com
|
e31ffe1f95b4ccb8bfc800dd1d762b33eea9a203
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractInacloudspaceWordpressCom.py
|
d57d99f589b5ed6165f014a0a818e2dcb1ef10e2
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
def extractInacloudspaceWordpressCom(item):
'''
Parser for 'inacloudspace.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Drunken Exquisiteness', 'Drunken Exquisiteness', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
e6368bdc60f7acd094e96b47d3e1dccfe59f0286
|
3e0a2a0e489f41a5b6b8afb1c09227ae2b4a5c92
|
/picarx.py
|
01beed36417eb096f35725c41f2d1aaa437169f5
|
[] |
no_license
|
mlowell28/RobotSystems
|
0d82f2a9509dd0842be4c71a66182c90478092e7
|
8407c91044c1db002c8ddd097730d07e8892b96e
|
refs/heads/main
| 2023-05-26T05:48:53.198947
| 2021-06-09T21:18:48
| 2021-06-09T21:18:48
| 353,469,095
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,354
|
py
|
from ezblock import *
from ezblock import __reset_mcu__
import time
import atexit
__reset_mcu__()
time.sleep(0.01)
PERIOD = 4095
PRESCALER = 10
TIMEOUT = 0.02
dir_servo_pin = Servo(PWM('P2'))
camera_servo_pin1 = Servo(PWM('P0'))
camera_servo_pin2 = Servo(PWM('P1'))
left_rear_pwm_pin = PWM("P13")
right_rear_pwm_pin = PWM("P12")
left_rear_dir_pin = Pin("D4")
right_rear_dir_pin = Pin("D5")
S0 = ADC('A0')
S1 = ADC('A1')
S2 = ADC('A2')
Servo_dir_flag = 1
dir_cal_value = 0
cam_cal_value_1 = 0
cam_cal_value_2 = 0
motor_direction_pins = [left_rear_dir_pin, right_rear_dir_pin]
motor_speed_pins = [left_rear_pwm_pin, right_rear_pwm_pin]
cali_dir_value = [1, -1]
cali_speed_value = [0, 0]
#初始化PWM引脚
for pin in motor_speed_pins:
pin.period(PERIOD)
pin.prescaler(PRESCALER)
def set_motor_speed(motor, speed):
global cali_speed_value,cali_dir_value
motor -= 1
if speed >= 0:
direction = 1 * cali_dir_value[motor]
elif speed < 0:
direction = -1 * cali_dir_value[motor]
speed = abs(speed)
if speed != 0:
speed = int(speed /2 ) + 50
speed = speed - cali_speed_value[motor]
if direction < 0:
motor_direction_pins[motor].high()
motor_speed_pins[motor].pulse_width_percent(speed)
else:
motor_direction_pins[motor].low()
motor_speed_pins[motor].pulse_width_percent(speed)
def motor_speed_calibration(value):
global cali_speed_value,cali_dir_value
cali_speed_value = value
if value < 0:
cali_speed_value[0] = 0
cali_speed_value[1] = abs(cali_speed_value)
else:
cali_speed_value[0] = abs(cali_speed_value)
cali_speed_value[1] = 0
def motor_direction_calibration(motor, value):
# 0: positive direction
# 1:negative direction
global cali_dir_value
motor -= 1
if value == 1:
cali_dir_value[motor] = -1*cali_dir_value[motor]
def dir_servo_angle_calibration(value):
global dir_cal_value
dir_cal_value = value
set_dir_servo_angle(dir_cal_value)
# dir_servo_pin.angle(dir_cal_value)
def set_dir_servo_angle(value):
global dir_cal_value
dir_servo_pin.angle(value+dir_cal_value)
def camera_servo1_angle_calibration(value):
global cam_cal_value_1
cam_cal_value_1 = value
set_camera_servo1_angle(cam_cal_value_1)
# camera_servo_pin1.angle(cam_cal_value)
def camera_servo2_angle_calibration(value):
global cam_cal_value_2
cam_cal_value_2 = value
set_camera_servo2_angle(cam_cal_value_2)
# camera_servo_pin2.angle(cam_cal_value)
def set_camera_servo1_angle(value):
global cam_cal_value_1
camera_servo_pin1.angle(-1 *(value+cam_cal_value_1))
def set_camera_servo2_angle(value):
global cam_cal_value_2
camera_servo_pin2.angle(-1 * (value+cam_cal_value_2))
def get_adc_value():
adc_value_list = []
adc_value_list.append(S0.read())
adc_value_list.append(S1.read())
adc_value_list.append(S2.read())
return adc_value_list
def set_power(speed):
set_motor_speed(1, speed)
set_motor_speed(2, speed)
def backward(speed):
set_motor_speed(1, speed)
set_motor_speed(2, speed)
def forward(speed):
set_motor_speed(1, -1*speed)
set_motor_speed(2, -1*speed)
def stop():
set_motor_speed(1, 0)
set_motor_speed(2, 0)
def Get_distance():
timeout=0.01
trig = Pin('D8')
echo = Pin('D9')
trig.low()
time.sleep(0.01)
trig.high()
time.sleep(0.000015)
trig.low()
pulse_end = 0
pulse_start = 0
timeout_start = time.time()
while echo.value()==0:
pulse_start = time.time()
if pulse_start - timeout_start > timeout:
return -1
while echo.value()==1:
pulse_end = time.time()
if pulse_end - timeout_start > timeout:
return -2
during = pulse_end - pulse_start
cm = round(during * 340 / 2 * 100, 2)
#print(cm)
return cm
def test():
# set_dir_servo_angle(0)
#time.sleep(1)
forward(50)
time.sleep(1)
# set_dir_servo_angle(0)
# time.sleep(1)
# set_motor_speed(1, 1)
# set_motor_speed(2, 1)
# camera_servo_pin.angle(0)
if __name__ == "__main__":
atexit.register(stop)
__reset_mcu__()
time.sleep(0.01)
dir_servo_angle_calibration(-10)
time.sleep(.1)
while 1:
test()
time.sleep(1)
|
[
"you@example.com"
] |
you@example.com
|
4516a4a31e687c163b02622a904cae6e349a07f4
|
bf9ae1e4269952622c7f03dc86c418d21eb20ec7
|
/PythonCode-FatherandSon/示例代码/TIO_CH22_2.py
|
1956584d2e89471f7f30eefe14a58c4e5d5668e8
|
[] |
no_license
|
ChuixinZeng/PythonStudyCode
|
5692ca7cf5fe9b9ca24e9f54f6594f3a79b0ffb5
|
2986c83c804da51ef386ca419d0c4ebcf194cf8f
|
refs/heads/master
| 2021-01-21T16:09:58.622069
| 2019-12-01T14:30:36
| 2019-12-01T14:30:36
| 91,876,874
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
# TIO_CH22_2.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Answer to Try It Out, Question 2, Chapter 22
# Save some data to a text file
name = raw_input("Enter your name: ")
age = raw_input("Enter your age: ")
color = raw_input("Enter your favorite color: ")
food = raw_input("Enter your favorite food: ")
my_data = open("my_data_file.txt", 'w')
my_data.write(name + "\n")
my_data.write(age + "\n")
my_data.write(color + "\n")
my_data.write(food)
my_data.close()
|
[
"zengchuixin@126.com"
] |
zengchuixin@126.com
|
9b8eaf92a3384ae848cae589c602cbf9bb952432
|
ff93e108a358a40d71b426bb9615587dfcab4d03
|
/Python_Basic/5_Dictionaries/basics_of_dict.py
|
4b5cd56fb88e023dbfd19e8c705493c7c71ddf15
|
[] |
no_license
|
soumya9988/Python_Machine_Learning_Basics
|
074ff0e8e55fd925ca50e0f9b56dba76fc93d187
|
3711bc8e618123420985d01304e13051d9fb13e0
|
refs/heads/master
| 2020-03-31T14:31:49.217429
| 2019-11-16T21:55:54
| 2019-11-16T21:55:54
| 152,298,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
spam = {'Alice' : 30,
'planets' : ['mars', 'venus', 'earth', 'pluto'],
'pi' : 3.14,
1: 13}
# Key, values and items in dictionary
print(spam.keys())
print(spam.values())
print(spam.items())
# setdefault method in dict
spam.setdefault('colour', 'black')
print(spam)
spam.setdefault('colour', 'pink')
print(spam)
# get() method in dict with default value
print(spam.get('Alice', 50))
print(spam.get('Alan', 50))
|
[
"soumya.9988@gmail.com"
] |
soumya.9988@gmail.com
|
bb36fa74e3222d89bf01c2cafbdbe15c907ad403
|
30227ff573bcec32644fca1cca42ef4cdd612c3e
|
/leetcode/linkedList/problems/tests/test_list_deep_copy.py
|
ebe297cf39216dcc76a910b90e47673193f9a26c
|
[] |
no_license
|
saurabh-pandey/AlgoAndDS
|
bc55864422c93e6c93b8432e483394f286ce8ef2
|
dad11dedea9ceb4904d6c2dea801ce0172abfc81
|
refs/heads/master
| 2023-07-01T09:12:57.951949
| 2023-06-15T12:16:36
| 2023-06-15T12:16:36
| 88,239,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
import pytest
import problems.list_deep_copy as prob
def toList(head):
output = []
currNode = head
while currNode is not None:
nodeList = [currNode.val]
if currNode.random is not None:
nodeList.append(currNode.random.val)
else:
nodeList.append(None)
output.append(nodeList)
currNode = currNode.next
return output
def createRandomList(input):
head = None
currNode = None
nodes = []
for nodeList in input:
newNode = prob.Node(nodeList[0])
nodes.append(newNode)
if head is None:
head = newNode
currNode = head
else:
currNode.next = newNode
currNode = currNode.next
for i in range(len(input)):
nodeList = input[i]
currNode = nodes[i]
if nodeList[1] is not None:
randomId = nodeList[1]
if randomId < len(nodes):
randomNode = nodes[randomId]
currNode.random = randomNode
return head
class TestListDeepCopy:
def test_example1(self):
input = [[7,None],[13,0],[11,4],[10,2],[1,0]]
head = createRandomList(input)
copiedList = prob.copyRandomList(head)
assert toList(copiedList) == toList(head)
def test_example2(self):
input = [[1,1],[2,1]]
head = createRandomList(input)
copiedList = prob.copyRandomList(head)
assert toList(copiedList) == toList(head)
def test_example3(self):
input = [[3,None],[3,0],[3,None]]
head = createRandomList(input)
copiedList = prob.copyRandomList(head)
assert toList(copiedList) == toList(head)
def test_example4(self):
input = []
head = createRandomList(input)
copiedList = prob.copyRandomList(head)
assert toList(copiedList) == toList(head)
|
[
"saurabhpandey85@gmail.com"
] |
saurabhpandey85@gmail.com
|
643dfe06feab3a458e55f0b9b5cf060e9f8d5409
|
75f5767b35095d0afcc616925bf6768ec32cb79f
|
/old/src/coc.py
|
017875a415bb4f9f88eb95fdeb801f756b6fa62e
|
[] |
no_license
|
ai-se/cocreport
|
ca1832d013c45fd908d92de650ac7bc3b5a3d47a
|
102b9240fdd640ee55564a7d44504b0f29f22add
|
refs/heads/master
| 2020-04-06T09:47:50.280628
| 2016-11-26T18:29:59
| 2016-11-26T18:29:59
| 30,427,607
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,322
|
py
|
_ = None; Coc2tunings = [[
# vlow low nom high vhigh xhigh
# scale factors:
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _],[
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _],[
# effort multipliers:
'acap', 1.42, 1.19, 1.00, 0.85, 0.71, _],[
'aexp', 1.22, 1.10, 1.00, 0.88, 0.81, _],[
'cplx', 0.73, 0.87, 1.00, 1.17, 1.34, 1.74],[
'data', _, 0.90, 1.00, 1.14, 1.28, _],[
'docu', 0.81, 0.91, 1.00, 1.11, 1.23, _],[
'ltex', 1.20, 1.09, 1.00, 0.91, 0.84, _],[
'pcap', 1.34, 1.15, 1.00, 0.88, 0.76, _],[
'pcon', 1.29, 1.12, 1.00, 0.90, 0.81, _],[
'plex', 1.19, 1.09, 1.00, 0.91, 0.85, _],[
'pvol', _, 0.87, 1.00, 1.15, 1.30, _],[
'rely', 0.82, 0.92, 1.00, 1.10, 1.26, _],[
'ruse', _, 0.95, 1.00, 1.07, 1.15, 1.24],[
'sced', 1.43, 1.14, 1.00, 1.00, 1.00, _],[
'site', 1.22, 1.09, 1.00, 0.93, 0.86, 0.80],[
'stor', _, _, 1.00, 1.05, 1.17, 1.46],[
'time', _, _, 1.00, 1.11, 1.29, 1.63],[
'tool', 1.17, 1.09, 1.00, 0.90, 0.78, _]]
def COCOMO2(project, a = 2.94, b = 0.91, # defaults
tunes= Coc2tunings):# defaults
sfs,ems,kloc = 0, 5 ,22
scaleFactors, effortMultipliers = 5, 17
for i in range(scaleFactors):
sfs += tunes[i][project[i]]
for i in range(effortMultipliers):
j = i + scaleFactors
ems *= tunes[j][project[j]]
return a * ems * project[kloc] ** (b + 0.01*sfs)
def COCONUT(training, # list of projects
a=10, b=1, # initial (a,b) guess
deltaA = 10, # range of "a" guesses
deltaB = 0.5, # range of "b" guesses
depth = 10, # max recursive calls
constricting=0.66):# next time,guess less
if depth > 0:
useful,a1,b1= GUESSES(training,a,b,deltaA,deltaB)
if useful: # only continue if something useful
return COCONUT(training,
a1, b1, # our new next guess
deltaA * constricting,
deltaB * constricting,
depth - 1)
return a,b
def GUESSES(training, a,b, deltaA, deltaB,
repeats=20): # number of guesses
useful, a1,b1,least,n = False, a,b, 10**32, 0
while n < repeats:
n += 1
aGuess = a1 - deltaA + 2 * deltaA * rand()
bGuess = b1 - deltaB + 2 * deltaB * rand()
error = ASSESS(training, aGuess, bGuess)
if error < least: # found a new best guess
useful,a1,b1,least = True,aGuess,bGuess,error
return useful,a1,b1
def ASSESS(training, aGuess, bGuess):
error = 0.0
for project in training: # find error on training
predicted = COCOMO2(project, aGuess, bGuess)
actual = effort(project)
error += abs(predicted - actual) / actual
return error / len(training) # mean training error
def RIG():
DATA = { COC81, NASA83, COC05, NASA10 }
for data in DATA: # e.g. data = COC81
mres= {}
for learner in LEARNERS: # e.g. learner = COCONUT
for n in range(10): #10 times repeat
for project in DATA: # e.g. one project
training = data - project # leave-one-out
model = learn(training)
estimate = guess(model, project)
actual = effort(project)
mre = abs(actual - estimate)/actual
mres[learner][n] = mre
print rank(mres) # some statistical tests
def demo():
most , least, mid = {},{},{}
for i,x in enumerate(Coc2tunings):
ranges = x[1:]
hi, lo = -1, 10**32
jhi, jlo= -1, 10
for j,y in enumerate(ranges):
k = j+1
if y == _:
continue
if y > hi:
jhi,hi = k,y
if y < lo:
jlo,lo = k,y
most[i] = jhi
least[i] = jlo
mid[i] = 4
for k in range(10,1000,10):
least[22] = most[22] = mid[22] = k
print k,COCOMO2(least), COCOMO2(mid), COCOMO2(most)
demo()
|
[
"tim.menzies@gmail.com"
] |
tim.menzies@gmail.com
|
08b9326e06cca6119034079ff245832c668e5a0b
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/surface/compute/networks/subnets/describe.py
|
aa960587e544f5f40969d3afadff5c479fd18533
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Command for describing subnetworks."""
from googlecloudsdk.api_lib.compute import base_classes
class Describe(base_classes.RegionalDescriber):
"""Describe a Google Compute Engine subnetwork.
*{command}* displays all data associated with a Google Compute
Engine subnetwork.
"""
@staticmethod
def Args(parser):
base_classes.RegionalDescriber.Args(parser, 'compute.subnetworks')
base_classes.AddFieldsFlag(parser, 'subnetworks')
@property
def service(self):
return self.compute.subnetworks
@property
def resource_type(self):
return 'subnetworks'
|
[
"joe@longreen.io"
] |
joe@longreen.io
|
d45044c57759e27116e80ecbd70cd22d4cb3dac8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03418/s290294208.py
|
0dc31ee21ead32a26ad160f8881c6dff33c39726
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
n, k = map(int, input().split())
ans = n**2
for b in range(1, n+1):
if b > k:
ans -= (n//b)*k+min([n%b, max([k-1, 0])])
else:
ans -= n
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9b27a14f61da3ca6e8ef0f45c05e2a1affff2547
|
6ac2c27121d965babbb4bcbc7c479c26bf60bdf5
|
/pymatex/search/IndexCreatorVisitor.py
|
4fe3d2d775c13e181728520af7fba07bb55e3a94
|
[
"MIT"
] |
permissive
|
Gawaboumga/PyMatex
|
5a2e18c3e17d3b76e814492f7e2ca63a57d720e9
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
refs/heads/master
| 2020-03-28T01:40:32.341723
| 2018-12-20T13:49:12
| 2018-12-20T13:49:12
| 147,521,693
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,268
|
py
|
from pymatex.listener import MatexASTVisitor
from pymatex.node import *
class IndexCreatorVisitor(MatexASTVisitor.MatexASTVisitor):
def __init__(self, data: dict, pk: int):
self.data = data
self.pk = pk
self.nodes_seen = {}
self.bound_variables = set()
def get_number_of_nodes_of_different_nodes(self):
return len(self.nodes_seen)
def visit_addition(self, addition_node: Addition):
depth_lhs = addition_node.lhs.accept(self)
depth_rhs = addition_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.ADDITION)
return node_depth
def visit_constant(self, constant_node: Constant):
node_depth = 0
self.add(node_depth, NodeType.CONSTANT, constant_node.value)
return node_depth
def visit_division(self, division_node: Division):
depth_lhs = division_node.lhs.accept(self)
depth_rhs = division_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.DIVISION)
return node_depth
def visit_exponentiation(self, exponentiation_node: Exponentiation):
depth_expr = exponentiation_node.lhs.accept(self)
depth_exponent = exponentiation_node.rhs.accept(self)
node_depth = max(depth_expr, depth_exponent) + 1
self.add(node_depth, NodeType.EXPONENTIATION)
return node_depth
def visit_fraction(self, fraction_node: Fraction):
if fraction_node.variable:
fraction_node.variable.accept(self)
fraction_node.start_range.accept(self)
if fraction_node.end_range:
fraction_node.end_range.accept(self)
if fraction_node.variable:
self.add_bound_variable(fraction_node.variable)
depth_expression = fraction_node.expression.accept(self)
if fraction_node.variable:
self.remove_bound_variable(fraction_node.variable)
node_depth = depth_expression + 1
self.add(node_depth, NodeType.FRACTION)
return node_depth
def visit_function(self, function_node: Function):
first_argument = function_node.argument(0)
depth = first_argument.accept(self)
for i in range(1, function_node.number_of_arguments()):
depth = min(depth, function_node.argument(i).accept(self))
node_depth = depth + 1
self.add(node_depth, NodeType.FUNCTION)
return node_depth
def visit_indexed_variable(self, indexed_variable_node: IndexedVariable):
depth = indexed_variable_node.index.accept(self)
node_depth = depth + 1
self.add(node_depth, NodeType.INDEXEDVARIABLE, indexed_variable_node.variable)
return node_depth
def visit_integral(self, integral_node: Integral):
integral_node.variable.accept(self)
integral_node.start_range.accept(self)
integral_node.end_range.accept(self)
self.add_bound_variable(integral_node.variable)
depth_expression = integral_node.expression.accept(self)
self.remove_bound_variable(integral_node.variable)
node_depth = depth_expression + 1
self.add(node_depth, NodeType.SUMMATION)
return node_depth
def visit_multiplication(self, multiplication_node: Multiplication):
depth_lhs = multiplication_node.lhs.accept(self)
depth_rhs = multiplication_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.MULTIPLICATION)
return node_depth
def visit_negate(self, negate_node: Negate):
depth = negate_node.node.accept(self)
self.add(depth + 1, NodeType.NEGATE)
return depth
def visit_product(self, product_node: Product):
if product_node.variable:
product_node.variable.accept(self)
product_node.start_range.accept(self)
if product_node.end_range:
product_node.end_range.accept(self)
if product_node.variable:
self.add_bound_variable(product_node.variable)
depth_expression = product_node.expression.accept(self)
if product_node.variable:
self.remove_bound_variable(product_node.variable)
node_depth = depth_expression + 1
self.add(node_depth, NodeType.PRODUCT)
return node_depth
def visit_set(self, set_node: Set):
depth_lhs = set_node.lhs.accept(self)
depth_rhs = set_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.SET)
return node_depth
def visit_set_difference(self, set_difference: SetDifference):
depth_lhs = set_difference.lhs.accept(self)
depth_rhs = set_difference.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.SET_DIFFERENCE)
return node_depth
def visit_subtraction(self, subtraction_node: Subtraction):
depth_lhs = subtraction_node.lhs.accept(self)
depth_rhs = subtraction_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.add(node_depth, NodeType.SUBTRACTION)
return node_depth
def visit_summation(self, summation_node: Summation):
if summation_node.variable:
summation_node.variable.accept(self)
summation_node.start_range.accept(self)
if summation_node.end_range:
summation_node.end_range.accept(self)
if summation_node.variable:
self.add_bound_variable(summation_node.variable)
depth_expression = summation_node.expression.accept(self)
if summation_node.variable:
self.remove_bound_variable(summation_node.variable)
node_depth = depth_expression + 1
self.add(node_depth, NodeType.SUMMATION)
return node_depth
def visit_variable(self, variable_node: Variable):
node_depth = 0
if str(variable_node.variable) in self.bound_variables:
self.add(node_depth, NodeType.BOUNDVARIABLE, variable_node.variable)
else:
self.add(node_depth, NodeType.VARIABLE, variable_node.variable)
return node_depth
def add(self, node_depth: int, node_type: NodeType, external_data=None):
nodes = self.data.get(node_depth, dict())
if external_data is None:
objects = nodes.get(node_type, set())
objects.add(self.pk)
else:
objects = nodes.get(node_type, dict())
associated = objects.get(external_data, set())
associated.add(self.pk)
objects[external_data] = associated
nodes[node_type] = objects
self.data[node_depth] = nodes
nodes_depth = self.nodes_seen.get(node_depth, set())
if nodes_depth is None:
nodes_depth.add(node_type)
else:
if node_type not in nodes_depth:
nodes_depth.add(node_type)
self.nodes_seen[node_depth] = nodes_depth
def add_bound_variable(self, variable: Variable):
self.bound_variables.add(str(variable))
def remove_bound_variable(self, variable: Variable):
self.bound_variables.remove(str(variable))
|
[
"yourihubaut@hotmail.com"
] |
yourihubaut@hotmail.com
|
276069bda00ba209a3738e7f975ad78e5243e7ac
|
a6155458f58f2e40e2583557cf807eda52a0013b
|
/catalog/database_helpers.py
|
e58588c048dacef8e37b8ccd0c9d2a8d74ce96d9
|
[] |
no_license
|
georgeplusplus-ZZ/udacity-project-2
|
ab6c80052cc601508743fd5003ae5d09103d8fbb
|
5442f1f99808af2f8663d59fdbd02be7dd7e425a
|
refs/heads/master
| 2021-10-26T02:47:28.841918
| 2019-04-10T01:52:40
| 2019-04-10T01:52:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
#George Haralampopoulos 2019
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from catalog import app
from catalog.database_setup import Base
import requests
def connect_to_database():
"""Connects to the database and returns an sqlalchemy session object."""
engine = create_engine('sqlite:///nycattractions.db')
Base.metadata.bind = engine
db_session = sessionmaker(bind=engine)
session = db_session()
return session
def token_still_valid(access_token):
gapi_request = "https://www.googleapis.com/oauth2/v1/tokeninfo?access_token="
gapi_request+=access_token
resp = requests.get(gapi_request)
if(resp.status_code == 200):
resp_json = resp.json()
if resp_json.get("expires_in") > 0:
return True
return False
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
5ad8c85c4220faba9ed2da5a89e7b73fe36a248d
|
e4ab984c6d27167849f6c6e2d8ced3c0ee167c7c
|
/Edabit/Combinations.py
|
3a5bf494a7486fc769a4c9d00ba532a776266274
|
[] |
no_license
|
ravalrupalj/BrainTeasers
|
b3bc2a528edf05ef20291367f538cf214c832bf9
|
c3a48453dda29fe016ff89f21f8ee8d0970a3cf3
|
refs/heads/master
| 2023-02-10T02:09:59.443901
| 2021-01-06T02:03:34
| 2021-01-06T02:03:34
| 255,720,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
#Combinations
#Create a function that takes a variable number of groups of items, and returns the number of ways the items can be arranged, with one item from each group. Order does not matter.
def combinations(*items):
l=[]
for t in items:
l.append(str(t))
multi = 1
for i in l:
if i=='0':
continue
multi = multi * int(i)
return multi
print(combinations(6, 7, 0))
#42
print(combinations(2, 3, 4, 5, 6, 7, 8, 9, 10))
#3628800
print(combinations(2, 3) )
#➞ 6
print(combinations(3, 7, 4) )
#➞ 84
print(combinations(2, 3, 4, 5))
#➞ 120
|
[
"63676082+ravalrupalj@users.noreply.github.com"
] |
63676082+ravalrupalj@users.noreply.github.com
|
f0bb0595cc4ae45f13b3ffda4adab054d0aab904
|
71efd37d485c43f5872bf35a3fde45ba7aa7d91e
|
/flask_server_side/app.py
|
ba35e9257a2ee933810273b47cc01df0f388470f
|
[] |
no_license
|
jreiher2003/push-notifications
|
e6f90cb056aad726a6f5049139b36a6dd5368aff
|
d59ae39929ad0d6fce71ae2ca5b255d940530d62
|
refs/heads/master
| 2021-01-11T12:03:42.209063
| 2016-12-15T14:16:00
| 2016-12-15T14:16:00
| 76,565,602
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
from flask import Flask, render_template, request, redirect, url_for, Response, session
from flask_sse import sse
import redis
import datetime
app = Flask(__name__)
# app.config["REDIS_URL"] = "redis://localhost"
# app.register_blueprint(sse, url_prefix='/stream')
app.secret_key = 'asdf'
red = redis.StrictRedis()
# @app.route('/')
# def index():
# return render_template("index.html")
def event_stream():
pubsub = red.pubsub()
pubsub.subscribe('chat')
for message in pubsub.listen():
print message
yield 'data: %s\n\n' % message['data']
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session['user'] = request.form['user']
return redirect('/')
return '<form action="" method="post">user: <input name="user">'
@app.route("/logout")
def logout():
session.pop('user')
return redirect('/')
@app.route('/')
def home():
if 'user' not in session:
return redirect('/login')
return render_template('index2.html', session['user'])
@app.route('/post', methods=['POST'])
def post():
message = request.form['message']
user = session.get('user', 'anonymous')
now = datetime.datetime.now().replace(microsecond=0).time()
red.publish('chat', u'[%s] %s: %s' % (now.isoformat(), user, message))
return "print message"
@app.route('/stream')
def stream():
return Response(event_stream(), mimetype="text/event-stream")
# @app.route('/new')
# def new():
# return render_template("message.html")
# @app.route('/send', methods=['POST'])
# def send():
# data = {"message": request.form.get('message')}
# sse.publish(type="testevent", data=data, channel='test')
# return redirect(url_for('new'))
# @app.route('/hello')
# def publish_hello():
# data = {"message": "Hello!"}
# sse.publish(data=data, type='greeting', channel='test2')
# return "Message sent!"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5020, debug=True, threaded=True)
|
[
"jreiher2003@yahoo.com"
] |
jreiher2003@yahoo.com
|
b8f3957b6f14d803419ff4d6519073e3f1c398a8
|
489da428bc0e1ab8f5117c0f8ba5ddb7aff05360
|
/scripts/motors1.py
|
7d42b31c04843bce8fcf8b7e1775bd5f39123592
|
[
"BSD-3-Clause"
] |
permissive
|
norihisayamada/pimouse_ros
|
4f77e769b7ac9cbfc4af6e703764af1d2df56b30
|
3b07880a6ceb584d92cf640c1a38864130d44189
|
refs/heads/master
| 2020-04-17T03:03:49.424738
| 2019-02-23T11:05:38
| 2019-02-23T11:05:38
| 166,164,916
| 1
| 0
|
BSD-3-Clause
| 2019-01-17T05:18:41
| 2019-01-17T05:18:41
| null |
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
#!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
class Motor():
def __init__(self):
if not self.set_power(True): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
# Copyright 2016 Ryuichi Ueda
# Released under the BSD License.
# To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
|
[
"ryuichiueda@gmail.com"
] |
ryuichiueda@gmail.com
|
de1581f90bcd424674cf7ab97354de05f7ccfff9
|
b69e78b6757d7e9ca90272391116fa8c197d9d53
|
/testEfficientDockSize.py
|
57604164ef8abdcf8b573be965f813d415cdcd17
|
[] |
no_license
|
wangyingtaodeepin/autotest-dde-dock
|
d1cd3146c42d026e9a2f70205a72c65fb1927c7d
|
89e37500f6ba994df482599a0eeb3f1c175d51de
|
refs/heads/master
| 2021-01-09T20:39:48.989833
| 2016-08-10T01:33:48
| 2016-08-10T01:33:48
| 64,373,794
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,697
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from lib import utils
from lib import runner
result = True
class MyTestResult(runner.MyTextTestResult):
def addError(self, test, err):
super(MyTestResult, self).addError(test, err)
global result
result = result and False
def addFailure(self, test, err):
super(MyTestResult, self).addFailure(test, err)
global result
result = result and False
class EfficientDockSize(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.caseid = '68490'
cls.casename = "all-2493:高效模式大图标显示"
cls.ddedockobject = utils.getDdeDockObject()
cls.defaultdisplaymode = utils.getDdeDockDisplayMode()
cls.defaultposition = utils.getDdeDockPosition()
if utils.dock.displaymode_efficient != cls.defaultdisplaymode:
utils.setDdeDockDisplayMode(utils.dock.displaymode_efficient)
if utils.dock.position_bottom != cls.defaultposition:
utils.setDdeDockPosition(utils.dock.position_bottom)
@classmethod
def tearDownClass(cls):
global result
utils.commitresult(cls.caseid, result)
if utils.getDdeDockDisplayMode() != cls.defaultdisplaymode:
utils.setDdeDockDisplayMode(cls.defaultdisplaymode)
if utils.getDdeDockPosition() != cls.defaultposition:
utils.setDdeDockPosition(cls.defaultposition)
def setUp(self):
pass
def tearDown(self):
pass
def testIconSize(self):
launcher = self.ddedockobject.child("Launcher")
dbus_iconsize = utils.getDdeDockIconSize()
displaymode = utils.getDdeDockDisplayMode()
calculate_iconsize_y = 0
calculate_iconsize_x = 0
if utils.dock.displaymode_fashion == displaymode:
calculate_iconsize_y = int(dbus_iconsize * 1.5)
calculate_iconsize_x = int(calculate_iconsize_y * 1.1)
elif utils.dock.displaymode_efficient == displaymode:
calculate_iconsize_y = int(dbus_iconsize * 1.2)
calculate_iconsize_x = int(calculate_iconsize_y * 1.4)
self.assertEquals((calculate_iconsize_x, calculate_iconsize_y),
launcher.size)
def testChangeIconSizeToLarge(self):
utils.m.click(int(utils.resolution.width/2), utils.resolution.height, 2)
utils.dockmenu.findMainWindow()
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.enter_key)
dbus_iconsize = utils.getDdeDockIconSize()
self.assertTrue(dbus_iconsize == utils.dock.iconsize_large)
def testChangeIconSizeToMedium(self):
utils.m.click(int(utils.resolution.width/2), utils.resolution.height, 2)
utils.dockmenu.findMainWindow()
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.enter_key)
dbus_iconsize = utils.getDdeDockIconSize()
self.assertTrue(dbus_iconsize == utils.dock.iconsize_medium)
def testChangeIconSizeToSmall(self):
utils.m.click(int(utils.resolution.width/2), utils.resolution.height, 2)
utils.dockmenu.findMainWindow()
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.left_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.down_key)
utils.keySingle(utils.k.enter_key)
dbus_iconsize = utils.getDdeDockIconSize()
self.assertTrue(dbus_iconsize == utils.dock.iconsize_small)
def suite():
suite = unittest.TestSuite()
suite.addTest(EfficientDockSize('testIconSize'))
suite.addTest(EfficientDockSize('testChangeIconSizeToLarge'))
suite.addTest(EfficientDockSize('testIconSize'))
suite.addTest(EfficientDockSize('testChangeIconSizeToMedium'))
suite.addTest(EfficientDockSize('testIconSize'))
suite.addTest(EfficientDockSize('testChangeIconSizeToSmall'))
suite.addTest(EfficientDockSize('testIconSize'))
suite.addTest(EfficientDockSize('testChangeIconSizeToMedium'))
suite.addTest(EfficientDockSize('testIconSize'))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(resultclass=MyTestResult).run(suite())
|
[
"wangyingtao@deepin.com"
] |
wangyingtao@deepin.com
|
fa065210ccebf15da4cef79217b04ce380761e8e
|
d9eafd325ab775b7b32af2dd0b63afc7310be53d
|
/pfwra/home/migrations/0007_auto_20210326_0755.py
|
3d4f72d76270133753f0df224c1d54935fa4def2
|
[
"MIT"
] |
permissive
|
johnkellehernz/pfwra
|
54b0db7debaed629d6003e0826a15bde2fd4a197
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
refs/heads/main
| 2023-05-01T14:39:42.419993
| 2021-05-13T11:00:07
| 2021-05-13T11:00:07
| 353,514,688
| 0
| 0
|
MIT
| 2021-03-31T23:15:32
| 2021-03-31T23:15:31
| null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
# Generated by Django 3.0.11 on 2021-03-26 07:55
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0006_auto_20210324_2004'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured',
field=wagtail.core.fields.StreamField([('cards', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('header', wagtail.core.blocks.CharBlock(label='Header text')), ('text', wagtail.core.blocks.TextBlock(help_text='Write an introduction for the card', required=False)), ('link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link label', required=False)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Choose a page to link to', label='Page', required=False)), ('external_url', wagtail.core.blocks.URLBlock(help_text='Or choose an external URL to link to', label='External URL', required=False))], help_text='Link URL and link text (button)', required=False))]))], blank=True, help_text='Featured cards'),
),
migrations.AlterField(
model_name='homepage',
name='quotations',
field=wagtail.core.fields.StreamField([('quotes', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Quote title', required=False)), ('text', wagtail.core.blocks.TextBlock(label='Body of quote')), ('author', wagtail.core.blocks.CharBlock(label='Quote title', required=False)), ('link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link label', required=False)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Choose a page to link to', label='Page', required=False)), ('external_url', wagtail.core.blocks.URLBlock(help_text='Or choose an external URL to link to', label='External URL', required=False))], required=False))]))], blank=True, help_text='Featured quotes'),
),
]
|
[
"jordi.joan@gmail.com"
] |
jordi.joan@gmail.com
|
2e052f979575d022eae6b685e56b2e6187c3b127
|
e11e0d06e6a55c1e84c9d0cb885651cf2035e476
|
/ddco_code/lear.py
|
90d635b116057181ccc75c82b42031a380d46d6d
|
[
"MIT"
] |
permissive
|
DanielTakeshi/debridement-code
|
ae5a6413e58e9bae352f0adeae1d09185937dbed
|
a889dcc6e1c96ac0466afa9e4f7e76015dc3c958
|
refs/heads/master
| 2023-01-09T09:05:37.165092
| 2023-01-02T18:58:16
| 2023-01-02T18:58:16
| 96,563,293
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import pickle
import numpy as np
from sklearn.ensemble import RandomForestRegressor
data = []
f = open('data.p','r')
while True:
try:
d = pickle.load(f)
data.append(d)
except EOFError:
break
X = np.zeros((len(data),2))
Y = np.zeros((len(data),2))
for i,d in enumerate(data):
Y[i,0] = np.ravel(d['pos'][0])[0]
Y[i,1] = np.ravel(d['pos'][1])[0]
X[i,0] = d['estimate'][0]
X[i,1] = d['estimate'][1]
print(X, Y)
regx = RandomForestRegressor(n_estimators=3)
regx.fit(X,Y[:,0])
regy = RandomForestRegressor()
regy.fit(X,Y[:,1])
#Yp = reg.predict(X)
pickle.dump((regx,regy), open('model-sep.p','wb'))
import matplotlib.pyplot as plt
plt.scatter(Y[:,0], Y[:,1], c='r')
print(X, Y)
plt.scatter(regx.predict(X), regy.predict(X),c='b', marker='x')
plt.show()
|
[
"takeshidanny@gmail.com"
] |
takeshidanny@gmail.com
|
b1b625c333c9755c0f379779cf9d9b2613b21940
|
f22ca9aecda111a019502b462ce6772cb22d9425
|
/test/test_model_response_cart_script_list.py
|
529c99308a7ddcc364fd9e5a1f1cfcabd2bb5062
|
[] |
no_license
|
sivanv-unbxd/a2c-sdk-pim
|
cac05bc6335ddc3c4121d43e2dc476a6fec14965
|
51a07a0b7f90d74569ad14b47b174da7ac1fc374
|
refs/heads/main
| 2023-05-29T05:45:32.279821
| 2021-06-09T03:52:11
| 2021-06-09T03:52:11
| 375,218,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
# coding: utf-8
"""
Swagger API2Cart
API2Cart # noqa: E501
OpenAPI spec version: 1.1
Contact: contact@api2cart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.model_response_cart_script_list import ModelResponseCartScriptList # noqa: E501
from swagger_client.rest import ApiException
class TestModelResponseCartScriptList(unittest.TestCase):
"""ModelResponseCartScriptList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModelResponseCartScriptList(self):
"""Test ModelResponseCartScriptList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.model_response_cart_script_list.ModelResponseCartScriptList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"sivanv@unbxd.com"
] |
sivanv@unbxd.com
|
802549a81d933b4c840beebfe9acc73fcbda6d31
|
6b9084d234c87d7597f97ec95808e13f599bf9a1
|
/Dataset/Base/Video/Filter/func.py
|
b127cbe1276665f6b16fba571f79183c2de87d26
|
[] |
no_license
|
LitingLin/ubiquitous-happiness
|
4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc
|
aae2d764e136ca4a36c054212b361dd7e8b22cba
|
refs/heads/main
| 2023-07-13T19:51:32.227633
| 2021-08-03T16:02:03
| 2021-08-03T16:02:03
| 316,664,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,014
|
py
|
from Dataset.Filter.DataCleaning.ObjectCategory import DataCleaning_ObjectCategory
from Dataset.Filter.Selector import Selector
from Dataset.Filter.SortBySequenceFrameSize import SortBySequenceFrameSize
from Dataset.Filter.DataCleaning.Integrity import DataCleaning_Integrity
from Dataset.Filter.DataCleaning.BoundingBox import DataCleaning_BoundingBox
from Dataset.Filter.DataCleaning.AnnotationStandard import DataCleaning_AnnotationStandard
from .tweak_tool import VideoDatasetTweakTool
__all__ = ['apply_filters_on_video_dataset_']
def apply_filters_on_video_dataset_(dataset: dict, filters: list):
if len(filters) == 0:
return dataset
if 'filters' not in dataset:
dataset['filters'] = []
filters_backup = dataset['filters']
dataset_tweak_tool = VideoDatasetTweakTool(dataset)
for filter_ in filters:
if isinstance(filter_, Selector):
dataset_tweak_tool.apply_index_filter(filter_(len(dataset['sequences'])))
elif isinstance(filter_, DataCleaning_BoundingBox):
if filter_.fit_in_image_size:
dataset_tweak_tool.bounding_box_fit_in_image_size()
if filter_.update_validity:
dataset_tweak_tool.bounding_box_update_validity()
if filter_.remove_invalid_objects:
dataset_tweak_tool.bounding_box_remove_non_validity_objects()
if filter_.remove_empty_objects:
dataset_tweak_tool.bounding_box_remove_empty_annotation_objects()
elif isinstance(filter_, DataCleaning_Integrity):
if filter_.remove_zero_annotation_objects:
dataset_tweak_tool.remove_zero_annotation_objects()
if filter_.remove_zero_annotation_video_head_tail:
dataset_tweak_tool.remove_empty_annotation_head_tail()
if filter_.remove_invalid_image:
dataset_tweak_tool.remove_invalid_image()
elif isinstance(filter_, DataCleaning_ObjectCategory):
if filter_.category_ids_to_remove is not None:
dataset_tweak_tool.remove_category_ids(filter_.category_ids_to_remove)
if filter_.make_category_id_sequential:
dataset_tweak_tool.make_category_id_sequential()
elif isinstance(filter_, SortBySequenceFrameSize):
dataset_tweak_tool.sort_by_sequence_size(filter_.descending)
elif isinstance(filter_, DataCleaning_AnnotationStandard):
dataset_tweak_tool.annotation_standard_conversion(filter_.bounding_box_format,
filter_.pixel_coordinate_system,
filter_.bounding_box_coordinate_system,
filter_.pixel_definition)
else:
raise RuntimeError(f"{type(filter_)} not implemented for Video Dataset")
filters_backup.append(filter_.serialize())
dataset['filters'] = filters_backup
|
[
"linliting06@live.com"
] |
linliting06@live.com
|
22deb9e6511ee52c9f835d0a9e15c263ceac0035
|
1a5ea2453c6365e6f06031e66a6ef8f1ed6be4ce
|
/main/views.py
|
9a81be0a45bad9a530661eeed1b72febbf39b514
|
[] |
no_license
|
gusdn3477/ourSeoul
|
4283f6a8e49033049ca4c8c6e0386fbfc403ac45
|
378c89118825e391b85eef734bc287aca7b0d05a
|
refs/heads/main
| 2023-04-04T20:11:09.730391
| 2021-04-18T15:24:42
| 2021-04-18T15:24:42
| 343,732,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
from django.shortcuts import render, redirect
from .models import Post
# Create your views here.
def index(request):
return render(request, 'main/index.html')
def blog(request):
postlist = Post.objects.all()
return render(request, 'main/blog.html', {'postlist' : postlist})
def posting(request, pk):
post = Post.objects.get(pk=pk)
return render(request, 'main/posting.html', {'post':post})
def new_post(request):
'''if request.method == "POST":
if 'file' in request.FILES:
file = request.FILES['file']
filename = file._name
fp = open('%s/%s' % ('media/image/', filename), 'wb')
for chunk in file.chunks():
fp.write(chunk)
fp.close()
return HttpResponse('File Uploaded')
return HttpResponse('Failed to Upload File')'''
if request.method == 'POST':
form = Post()
form.postname = request.POST['postname']
form.contents = request.POST['contents']
try:
form.mainphoto = request.FILES['mainphoto']
except:
pass
form.save()
'''
mainphoto = request.FILES['mainphoto']
if mainphoto:
new_article = Post.objects.create(
postname=request.POST['postname'],
contents=request.POST['contents'],
mainphoto=mainphoto,
)
else:
new_article=Post.objects.create(
postname=request.POST['postname'],
contents=request.POST['contents'],
#mainphoto=request.FILES['mainphoto'],
)
'''
return redirect('/main/blog/')
return render(request, 'main/new_post.html')
def remove_post(request, pk):
post = Post.objects.get(pk=pk)
if request.method == 'POST':
post.delete()
return redirect('/blog/')
return render(request, 'main/remove_post.html', {'Post' : post})
'''
def upload(request):
if request.method == "POST":
if 'file' in request.FILES:
file = request.FILES['file']
filename = file._name
fp = open('%s/%s' % ('media/image/', filename), 'wb')
for chunk in file.chunks():
fp.write(chunk)
fp.close()
return HttpResponse('File Uploaded')
return HttpResponse('Failed to Upload File')
def upload_pic(request):
if request.method == 'POST':
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
m = ExampleModel.objects.get(pk=course_id)
m.model_pic = form.cleaned_data['image']
m.save()
return HttpResponse('image upload success')
return HttpResponseForbidden('allowed only via POST')
'''
|
[
"gusdn3477@naver.com"
] |
gusdn3477@naver.com
|
363a4f1f4d7a00c347f29e9c2e247a5ba694dacf
|
e8912ed90e97730b465b1e65084c1dbcc741a73e
|
/기본/알고리즘 D3/연습문제3.py
|
ac3b9db49165d8f43f49703867b4736e2a845fa9
|
[] |
no_license
|
yhnb3/Algorithm_lecture
|
a0dcefc27ed17bec3cadae56d69e3cc64239cbfb
|
461367e907e2b8a6a0cdc629e6a9029d9b03fba1
|
refs/heads/master
| 2020-12-22T05:39:05.412680
| 2020-04-10T09:16:19
| 2020-04-10T09:16:19
| 236,685,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
dx_j = [1, 0, -1, 0] # ->, v, <-, ^ 방향순
dy_i = [0, 1, 0, -1]
Array = [ [9,20,2,18,11], [19,1,25,3,21], [8,24,10,17,7], [15,4,16,5,6], [12,13,22,23,14] ]
IdxTbl = [] # (index1,index2) - key포함X
N = len(Array)
N2 = N*N
for w0 in range(N//2+1):
i = j = w0
if N-2*w0-1 == 0:
IdxTbl.append((i, j))
continue
for d in range(4):
for _ in range(N-2*w0-1):
IdxTbl.append((i, j))
ni = i + dy_i[d]
nj = j + dx_j[d]
i, j = ni, nj
print(IdxTbl)
# print Array Thr. IdxTbl
for i in range(N2) :
print(Array[IdxTbl[i][0]][IdxTbl[i][1]], end=" ")
print()
# Sort Array Thr. IdxTbl
for i in range(N2-1) :
min = i
for j in range(i+1,N2) :
if Array[IdxTbl[min][0]][IdxTbl[min][1]] > Array[IdxTbl[j][0]][IdxTbl[j][1]] : min = j # val값(key) 기준
idx_i0 = IdxTbl[i][0]; idx_i1 = IdxTbl[i][1]
min_i0 = IdxTbl[min][0]; min_i1 = IdxTbl[min][1]
Array[min_i0][min_i1], Array[idx_i0][idx_i1] = Array[idx_i0][idx_i1], Array[min_i0][min_i1]
# print Array Thr. IdxTbl
for i in range(N2) :
print(Array[IdxTbl[i][0]][IdxTbl[i][1]], end=" ")
print()
for i in range(N) :
print(Array[i])
|
[
"yhnb33@gmail.com"
] |
yhnb33@gmail.com
|
85924c934ef3229aa88c7b3b2028fdc8176a95b4
|
2500a2ab1f43c649fb0b4fe3b9e3420efa017efa
|
/Push/Sysex.py
|
d43c691dffbd6990a171e056ad2e9f5919ad2ce4
|
[] |
no_license
|
cappytan3/AbletonLive9_RemoteScripts
|
0ce3e2d728190ba2ff5d2422cd03ae8a5df9d46f
|
65d08fd4ccdadd8366eca6f3c0fa7932516147bf
|
refs/heads/master
| 2021-01-15T11:50:14.152579
| 2014-04-11T17:37:22
| 2014-04-11T17:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
#Embedded file name: /Users/versonator/Hudson/live/Projects/AppLive/Resources/MIDI Remote Scripts/Push/Sysex.py
START = (240, 71, 127, 21)
CLEAR_LINE1 = START + (28, 0, 0, 247)
CLEAR_LINE2 = START + (29, 0, 0, 247)
CLEAR_LINE3 = START + (30, 0, 0, 247)
CLEAR_LINE4 = START + (31, 0, 0, 247)
WRITE_LINE1 = START + (24, 0, 69, 0)
WRITE_LINE2 = START + (25, 0, 69, 0)
WRITE_LINE3 = START + (26, 0, 69, 0)
WRITE_LINE4 = START + (27, 0, 69, 0)
SET_AFTERTOUCH_MODE = START + (92, 0, 1)
CONTRAST_PREFIX = START + (122, 0, 1)
CONTRAST_ENQUIRY = START + (122, 0, 0, 247)
BRIGHTNESS_PREFIX = START + (124, 0, 1)
BRIGHTNESS_ENQUIRY = START + (124, 0, 0, 247)
ALL_PADS_SENSITIVITY_PREFIX = START + (93, 0, 32)
PAD_SENSITIVITY_PREFIX = START + (90, 0, 33)
def to_sysex_int(number, unused_parameter_name):
return (number >> 12 & 15,
number >> 8 & 15,
number >> 4 & 15,
number & 15)
CALIBRATION_SET = START + (87, 0, 20) + to_sysex_int(215, 'Preload Scale Factor') + to_sysex_int(1000, 'Recalibration Interval') + to_sysex_int(200, 'Stuck Pad Detection Threshold') + to_sysex_int(0, 'Stuck Pad NoteOff Threshold Adder') + to_sysex_int(200, 'Pad Ignore Time') + (247,)
MODE_CHANGE = START + (98, 0, 1)
USER_MODE = 1
LIVE_MODE = 0
WELCOME_MESSAGE = START + (1, 1, 247)
GOOD_BYE_MESSAGE = START + (1, 0, 247)
IDENTITY_PREFIX = START + (6, 2)
IDENTITY_ENQUIRY = START + (6, 1, 247)
DONGLE_PREFIX = START + (80, 0)
def make_presentation_message(application):
return START + (96,
0,
4,
65,
application.get_major_version(),
application.get_minor_version(),
application.get_bugfix_version(),
247)
IDENTITY_ENQUIRY = (240, 126, 0, 6, 1, 247)
IDENTITY_PREFIX = (240, 126, 0, 6, 2, 71, 21, 0, 25)
DONGLE_ENQUIRY_PREFIX = START + (80,)
DONGLE_PREFIX = START + (81,)
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
5e6038c7c43a05a8327b743a7542d215c3b5ade8
|
67379c2ae929266f303edc783c8c62edb521174b
|
/rm/ATResourceManager.py
|
8182dc0f14ad1ddfd23c07e14d3537d3ca95cfe6
|
[] |
no_license
|
bbb11808/seata-python
|
d20be83093d6d084ad36d9292a8ee18ad3bfc8c6
|
c53b605be423c781d38e599e5bade8df8c81c2d9
|
refs/heads/master
| 2023-02-11T01:22:18.488881
| 2021-01-05T10:10:08
| 2021-01-05T10:10:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,417
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
from core.context.RootContext import RootContext
from core.model.BranchStatus import BranchStatus
from core.protocol.RegisterRMRequestResponse import RegisterRMRequest
from core.protocol.ResultCode import ResultCode
from core.protocol.transaction.BranchRegisterRequestResponse import BranchRegisterRequest
from core.protocol.transaction.BranchReportRequestResponse import BranchReportRequest
from core.protocol.transaction.GlobalLockQueryRequestResponse import GlobalLockQueryRequest
from exception.RmTransactionException import RmTransactionException
from exception.ShouldNeverHappenException import ShouldNeverHappenException
from exception.TransactionException import TransactionException
from exception.TransactionExceptionCode import TransactionExceptionCode
from rm.RMClient import RMClient
from rm.datasource.PooledDBProxy import PooledDBProxy
from rm.datasource.undo.UndoLogManagerFactory import UndoLogManagerFactory
manager = None
class ATResourceManager(object):
def __init__(self):
self.pool_db_proxy_cache = dict()
pass
@staticmethod
def get():
global manager
if manager is None:
manager = ATResourceManager()
return manager
def register_resource(self, pooled_db_proxy):
if not isinstance(pooled_db_proxy, PooledDBProxy):
raise TypeError("Register resource type error.")
self.pool_db_proxy_cache[pooled_db_proxy.get_resource_id()] = pooled_db_proxy
request = RegisterRMRequest()
request.transaction_service_group = RMClient.get().transaction_service_group
request.application_id = RMClient.get().application_id
RMClient.get().send_sync_request(request)
def lock_query(self, branch_type, resource_id, xid, lock_keys):
try:
request = GlobalLockQueryRequest()
request.xid = xid
request.lock_key = lock_keys
request.resource_id = resource_id
if RootContext.in_global_transaction() or RootContext.require_global_lock():
response = RMClient.get().send_sync_request(request)
else:
raise RuntimeError("unknow situation!")
if response.result_code == ResultCode.Failed:
raise TransactionException(response.transaction_exception_code, "Response[{}]".format(response.msg))
return response.lockable
except TimeoutError as e:
raise RmTransactionException(TransactionExceptionCode.IO, "RPC Timeout", e)
except RuntimeError as e:
raise RmTransactionException(TransactionExceptionCode.BranchReportFailed, "Runtime", e)
def branch_register(self, branch_type, resource_id, client_id, xid, application_data, lock_keys):
try:
request = BranchRegisterRequest()
request.xid = xid
request.branch_type = branch_type
request.resource_id = resource_id
request.lock_key = lock_keys
request.application_data = application_data
response = RMClient.get().send_sync_request(request)
if response.result_code == ResultCode.Failed:
raise RmTransactionException("response {} {}".format(response.transaction_exception_code, response.msg))
except TimeoutError as e:
raise RmTransactionException(TransactionExceptionCode.IO, "RPC Timeout", e)
except RuntimeError as e:
raise RmTransactionException(TransactionExceptionCode.BranchReportFailed, "Runtime", e)
def branch_report(self, branch_type, xid, branch_id, status, application_data):
try:
request = BranchReportRequest()
request.xid = xid
request.branch_id = branch_id
request.status = status
request.application_data = application_data
response = RMClient.get().send_sync_request(request)
if response.result_code == ResultCode.Failed:
raise RmTransactionException(response.transaction_exception_code, "response [{}]".format(response.msg))
except TimeoutError as e:
raise RmTransactionException(TransactionExceptionCode.IO, "RPC Timeout", e)
except RuntimeError as e:
raise RmTransactionException(TransactionExceptionCode.BranchReportFailed, "Runtime", e)
def branch_rollback(self, branch_type, xid, branch_id, resource_id, application_data):
pool_db_proxy = self.pool_db_proxy_cache.get(resource_id)
if pool_db_proxy is None:
raise ShouldNeverHappenException()
try:
UndoLogManagerFactory.get_undo_log_manager(pool_db_proxy.get_db_type()).undo(pool_db_proxy, xid, branch_id)
except TransactionException as e:
print("branchRollback failed. branch_type:[{}], xid:[{}], branch_id:[{}], resource_id:[{}], "
"application_data:[{}], reason:[{}]".format(branch_type, xid, branch_id, resource_id,
application_data, e.message))
if e.code == TransactionExceptionCode.BranchRollbackFailed_Unretriable:
return BranchStatus.PhaseTwo_RollbackFailed_Unretryable
else:
return BranchStatus.PhaseTwo_RollbackFailed_Retryable
return BranchStatus.PhaseTwo_Rollbacked
|
[
"jsbxyyx@163.com"
] |
jsbxyyx@163.com
|
638adc9899a92436950cf5c686a1ff37d51413a6
|
565e2df93c18343d43c6dd216d5885155465f281
|
/test-runner/method_tests.py
|
bb50afcfcc250789af5aab358c044af0d034b5d2
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/iot-sdks-e2e-fx
|
ac394a188dee5660734c5afea70e12d666dbc92b
|
1d92dd6c4907760f4d04db251e2f53d5dd325b36
|
refs/heads/master
| 2023-09-01T08:25:38.190244
| 2023-06-13T00:04:21
| 2023-06-13T00:04:21
| 165,910,472
| 15
| 12
|
MIT
| 2023-07-21T19:03:42
| 2019-01-15T19:27:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,435
|
py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import pytest
import json
import asyncio
import limitations
from utilities import next_integer, next_random_string
from horton_logging import logger
async def run_method_call_test(source, destination):
"""
Helper function which invokes a method call on one module and responds to it from another module
"""
method_name = "test_method_{}".format(next_integer("test_method"))
method_payload = {"payloadData": next_random_string("method_payload")}
status_code = 1000 + next_integer("status_code")
method_invoke_parameters = {
"methodName": method_name,
"payload": method_payload,
"responseTimeoutInSeconds": 75,
"connectTimeoutInSeconds": 60,
}
method_response_body = {"responseData": next_random_string("method_response")}
if limitations.needs_manual_connect(destination):
await destination.connect2()
await destination.enable_methods()
# start listening for method calls on the destination side
receiver_future = asyncio.ensure_future(
destination.wait_for_method_and_return_response(
method_name, status_code, method_invoke_parameters, method_response_body
)
)
if getattr(source, "methods_registered", False):
registration_sleep = 0.5
else:
source.methods_registered = True
registration_sleep = 10
logger(
"sleeping for {} seconds to make sure all registration is complete".format(
registration_sleep
)
)
await asyncio.sleep(registration_sleep)
# invoking the call from caller side
if getattr(destination, "module_id", None):
sender_future = source.call_module_method(
destination.device_id, destination.module_id, method_invoke_parameters
)
else:
sender_future = source.call_device_method(
destination.device_id, method_invoke_parameters
)
(response, _) = await asyncio.gather(sender_future, receiver_future)
logger("method call complete. Response is:")
logger(str(response))
# wait for that response to arrive back at the source and verify that it's all good.
assert response["status"] == status_code
# edge bug: the response that edge returns is stringified. The same response that comes back from an iothub service call is not stringified
if isinstance(response["payload"], str):
response["payload"] = json.loads(response["payload"])
assert response["payload"] == method_response_body
await receiver_future
class BaseReceiveMethodCallTests(object):
@pytest.mark.it("Can receive a method call from the IoTHub service")
@pytest.mark.it("Can connect, enable methods, and disconnect")
async def test_module_client_connect_enable_methods_disconnect(self, client):
if limitations.needs_manual_connect(client):
await client.connect2()
await client.enable_methods()
class ReceiveMethodCallFromServiceTests(BaseReceiveMethodCallTests):
@pytest.mark.it("Can receive a method call from the IoTHub service")
async def test_method_call_invoked_from_service(self, client, service):
await run_method_call_test(source=service, destination=client)
class ReceiveMethodCallFromModuleTests(BaseReceiveMethodCallTests):
@pytest.mark.it("Can receive a method call from an EdgeHub module")
async def test_method_call_invoked_from_friend(self, client, friend):
await run_method_call_test(source=friend, destination=client)
class InvokeMethodCallOnModuleTests(object):
@pytest.mark.it("Can invoke a method call on an EdgeHub module")
async def test_method_call_invoked_on_friend(self, client, friend):
if limitations.uses_shared_key_auth(client):
limitations.skip_test_for(client, ["pythonv2", "c"])
await run_method_call_test(source=client, destination=friend)
class InvokeMethodCallOnLeafDeviceTests(object):
@pytest.mark.it("Can invoke a method call on an EdgeHub leaf device")
async def test_method_call_invoked_on_leaf_device(self, client, leaf_device):
if limitations.uses_shared_key_auth(client):
limitations.skip_test_for(client, ["pythonv2", "c"])
await run_method_call_test(source=client, destination=leaf_device)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
7458582adedec7294e8f56451dc4f117eb73def2
|
603519e0d087967caac72cce854dc7f1dfaa5262
|
/bioinformatics stronghold/SSET.py
|
27596c48d68aa9aa5edd2b69acaab2bc90d4456f
|
[] |
no_license
|
Morpheus2112/Rosalind-exercise
|
e591570521a12905864cb7e7f72b66816da7ae3a
|
e1047a5f6725e07c8cbf17594bfe4969cbc5d708
|
refs/heads/master
| 2022-07-25T00:07:17.316099
| 2020-02-16T07:18:21
| 2020-02-16T07:18:21
| 240,848,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
# -*- coding: utf-8 -*-
"""
see http://rosalind.info/problems/sset/
"""
def sset():
n = int(open("rosalind_sset.txt").read())
return 2**n % 10**6
print sset()
|
[
"palandswd@gmail.com"
] |
palandswd@gmail.com
|
be56131d0af5ece6f138489628e0b374cdafc512
|
901f9fb4c3fe2e5ac716462795b365e9e68f8808
|
/eventsourcing/tests/persistence_tests/test_infrastructure_factory.py
|
353331bd93dea0bc8b6d7e30c774eb22828af802
|
[
"BSD-3-Clause"
] |
permissive
|
alexyarmoshko/eventsourcing
|
e64571fd85c8d9ece5114d950cd47d7649420890
|
59f79eeaf897d349a9fdd3436ce18fcce78a77a3
|
refs/heads/master
| 2023-03-06T04:27:15.346517
| 2023-02-19T04:10:22
| 2023-02-19T04:10:22
| 175,817,681
| 0
| 0
|
BSD-3-Clause
| 2019-03-15T12:38:06
| 2019-03-15T12:38:04
| null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
from unittest.case import TestCase
from eventsourcing.persistence import InfrastructureFactory
from eventsourcing.utils import Environment, get_topic
class TestInfrastructureFactoryErrors(TestCase):
def test_construct_raises_exception(self):
with self.assertRaises(EnvironmentError):
InfrastructureFactory.construct(
Environment(
env={InfrastructureFactory.PERSISTENCE_MODULE: "invalid topic"}
)
)
with self.assertRaises(AssertionError):
InfrastructureFactory.construct(
Environment(
env={InfrastructureFactory.PERSISTENCE_MODULE: get_topic(object)}
)
)
|
[
"john.bywater@appropriatesoftware.net"
] |
john.bywater@appropriatesoftware.net
|
dd108645cdb1bf8c3d67e2aa1b361f00d42b223f
|
dce8531d0e9665a09205f70a909ac1424f7e09eb
|
/preprocessor/ljspeech.py
|
f8511d9cee17454f9dd79d14376ddb543f554717
|
[
"MIT"
] |
permissive
|
keonlee9420/Comprehensive-Tacotron2
|
40a6e5fcecf55ee02a8523a7e2701b6124748bee
|
1eff7f08c41a2127bbe300b6d66ce5c966422b25
|
refs/heads/main
| 2023-08-07T16:10:15.133301
| 2022-02-20T14:30:07
| 2022-02-20T14:44:36
| 388,990,172
| 39
| 17
|
MIT
| 2023-07-31T13:08:05
| 2021-07-24T03:36:08
|
Python
|
UTF-8
|
Python
| false
| false
| 6,997
|
py
|
import os
import random
import json
import tgt
import librosa
import numpy as np
from tqdm import tqdm
import audio as Audio
from text import text_to_sequence
from utils.tools import save_mel_and_audio
random.seed(1234)
class Preprocessor:
def __init__(self, config):
self.dataset = config["dataset"]
self.in_dir = config["path"]["corpus_path"]
self.out_dir = config["path"]["preprocessed_path"]
self.val_size = config["preprocessing"]["val_size"]
self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
self.skip_len = config["preprocessing"]["audio"]["skip_len"]
self.trim_top_db = config["preprocessing"]["audio"]["trim_top_db"]
self.filter_length = config["preprocessing"]["stft"]["filter_length"]
self.hop_length = config["preprocessing"]["stft"]["hop_length"]
self.silence_audio_size = config["preprocessing"]["audio"]["silence_audio_size"]
self.pre_emphasis = config["preprocessing"]["audio"]["pre_emphasis"]
self.max_wav_value = config["preprocessing"]["audio"]["max_wav_value"]
self.sanity_check = config["preprocessing"]["sanity_check"]
self.cleaners = config["preprocessing"]["text"]["text_cleaners"]
self.STFT = Audio.stft.TacotronSTFT(
config["preprocessing"]["stft"]["filter_length"],
config["preprocessing"]["stft"]["hop_length"],
config["preprocessing"]["stft"]["win_length"],
config["preprocessing"]["mel"]["n_mel_channels"],
config["preprocessing"]["audio"]["sampling_rate"],
config["preprocessing"]["mel"]["mel_fmin"],
config["preprocessing"]["mel"]["mel_fmax"],
)
self.val_prior = self.val_prior_names(os.path.join(self.out_dir, "val.txt"))
def val_prior_names(self, val_prior_path):
val_prior_names = set()
if os.path.isfile(val_prior_path):
print("Load pre-defined validation set...")
with open(val_prior_path, "r", encoding="utf-8") as f:
for m in f.readlines():
val_prior_names.add(m.split("|")[0])
return list(val_prior_names)
else:
return None
def build_from_path(self):
os.makedirs((os.path.join(self.out_dir, "text")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
print("Processing Data ...")
out = list()
train = list()
val = list()
n_frames = 0
mel_min = float('inf')
mel_max = -float('inf')
speakers = {self.dataset: 0}
with open(os.path.join(self.in_dir, "metadata.csv"), encoding="utf-8") as f:
for line in tqdm(f.readlines()):
parts = line.strip().split("|")
basename = parts[0]
text = parts[2]
wav_path = os.path.join(self.in_dir, "wavs", "{}.wav".format(basename))
ret = self.process_utterance(text, wav_path, self.dataset, basename)
if ret is None:
continue
else:
info, n, m_min, m_max = ret
if self.val_prior is not None:
if basename not in self.val_prior:
train.append(info)
else:
val.append(info)
else:
out.append(info)
if mel_min > m_min:
mel_min = m_min
if mel_max < m_max:
mel_max = m_max
n_frames += n
# Save files
with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
f.write(json.dumps(speakers))
with open(os.path.join(self.out_dir, "stats.json"), "w") as f:
stats = {
"mel": [
float(mel_min),
float(mel_max),
],
}
f.write(json.dumps(stats))
print(
"Total time: {} hours".format(
n_frames * self.hop_length / self.sampling_rate / 3600
)
)
if self.val_prior is not None:
assert len(out) == 0
random.shuffle(train)
train = [r for r in train if r is not None]
val = [r for r in val if r is not None]
else:
assert len(train) == 0 and len(val) == 0
random.shuffle(out)
out = [r for r in out if r is not None]
train = out[self.val_size :]
val = out[: self.val_size]
# Write metadata
with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
for m in train:
f.write(m + "\n")
with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
for m in val:
f.write(m + "\n")
return out
def load_audio(self, wav_path):
wav_raw, _ = librosa.load(wav_path, self.sampling_rate)
if len(wav_raw) < self.skip_len:
return None
wav = wav_raw / np.abs(wav_raw).max() * 0.999
wav = librosa.effects.trim(wav, top_db=self.trim_top_db, frame_length=self.filter_length, hop_length=self.hop_length)[0]
if self.pre_emphasis:
wav = np.append(wav[0], wav[1:] - 0.97 * wav[:-1])
wav = wav / np.abs(wav).max() * 0.999
wav = np.append(wav, [0.] * self.hop_length * self.silence_audio_size)
wav = wav.astype(np.float32)
return wav_raw, wav
def process_utterance(self, raw_text, wav_path, speaker, basename):
# Preprocess text
text = np.array(text_to_sequence(raw_text, self.cleaners))
# Load and process wav files
wav_raw, wav = self.load_audio(wav_path)
# Compute mel-scale spectrogram
mel_spectrogram = Audio.tools.get_mel_from_wav(wav, self.STFT)
# Sanity check
if self.sanity_check:
save_mel_and_audio(mel_spectrogram, wav*self.max_wav_value,
self.sampling_rate, self.out_dir, basename, tag="processed"
)
save_mel_and_audio(Audio.tools.get_mel_from_wav(wav_raw, self.STFT), wav_raw*self.max_wav_value,
self.sampling_rate, self.out_dir, basename, tag="raw"
)
exit(0) # quit for testing
# Save files
text_filename = "{}-text-{}.npy".format(speaker, basename)
np.save(
os.path.join(self.out_dir, "text", text_filename),
text,
)
mel_filename = "{}-mel-{}.npy".format(speaker, basename)
np.save(
os.path.join(self.out_dir, "mel", mel_filename),
mel_spectrogram.T,
)
return (
"|".join([basename, speaker, raw_text]),
mel_spectrogram.shape[1],
np.min(mel_spectrogram),
np.max(mel_spectrogram),
)
|
[
"keonlee9420@gmail.com"
] |
keonlee9420@gmail.com
|
d750021583b3df8500064a56702ba10b22f9f8f1
|
de644b254b17a28f82e9212d80872a3d9eca2149
|
/lib/gii/core/CommonAsset/AssetListAsset.py
|
11600ec3b960789afc552d4db6bb2d2c116aac2f
|
[
"MIT"
] |
permissive
|
pixpil/gii
|
506bee02b11eb412016b583d807dcfcc485e189c
|
ba6d94ada86d82bacae06f165567a02585264440
|
refs/heads/master
| 2021-12-03T06:30:31.503481
| 2021-11-24T03:02:49
| 2021-11-24T03:02:49
| 431,331,021
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import os.path
from gii.core import AssetManager, AssetLibrary, app, JSONHelper
from gii.core import AssetManager
class AssetListAssetManager( AssetManager ):
def getName(self):
return 'asset_manager.asest_list'
def getMetaType( self ):
return 'asest_list'
def acceptAssetFile(self, filepath):
if not os.path.isfile(filepath): return False
name,ext = os.path.splitext(filepath)
return ext in ['.asset_list' ]
def importAsset(self, node, reload = False ):
node.assetType = 'asset_list'
node.setObjectFile( 'data', node.getFilePath() )
return True
AssetListAssetManager().register()
|
[
"tommo.zhou@gmail.com"
] |
tommo.zhou@gmail.com
|
d5f658bfdf1c021dd3a93bb551fd8042b89315a1
|
e2bd39106992b592de686e5bd79002edc05cc8bc
|
/1438-绝对差不超过限制的最长连续子数组/LongestSubarray.py
|
bb8bc7b83512f73a1ecd2d2492cd7e135b6b3f29
|
[] |
no_license
|
Mumulhy/LeetCode
|
9b8ad3af9f9a3b838bdd54727cf8f33401292d27
|
269419ba2a2840fcf100fa217c5275029ffa229e
|
refs/heads/master
| 2022-10-28T23:06:54.081073
| 2022-10-23T07:48:49
| 2022-10-23T07:48:49
| 212,135,892
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,689
|
py
|
# -*- coding: utf-8 -*-
# LeetCode 1438-绝对差不超过限制的最长连续子数组
"""
Created on Mon Feb 22 17:47 2021
@author: _Mumu
Environment: py37
"""
class Solution:
def longestSubarray(self, nums: list, limit: int) -> int:
from collections import deque
q_max = deque()
q_min = deque()
n = len(nums)
left = 0
right = 0
res = 0
while right < n:
while q_max and q_max[-1] < nums[right]:
q_max.pop()
while q_min and q_min[-1] > nums[right]:
q_min.pop()
q_max.append(nums[right])
q_min.append(nums[right])
while q_max[0] - q_min[0] > limit:
if nums[left] == q_max[0]:
q_max.popleft()
elif nums[left] == q_min[0]:
q_min.popleft()
left += 1
res = max(res, right-left+1)
right += 1
return res
# 以下为滑动窗口+有序集合代码
# from sortedcontainers.sortedlist import SortedList
# sl = SortedList()
# n = len(nums)
# left = 0
# right = 0
# res = 0
# while right < n:
# sl.add(nums[right])
# while sl[-1] - sl[0] > limit:
# sl.remove(nums[left])
# left += 1
# right += 1
# res = max(res, right-left)
# return res
# 以下为自己写的超时的代码
# left = 0
# right = 1
# n = len(nums)
# max_num = nums[0]
# min_num = nums[0]
# res = 1
# while 1:
# if right == n:
# res = max(res, right-left)
# break
# elif abs(nums[right]-max_num) <= limit and abs(nums[right]-min_num) <= limit:
# max_num = max(max_num, nums[right])
# min_num = min(min_num, nums[right])
# right += 1
# elif left == right-1:
# left += 1
# right += 1
# max_num = nums[left]
# min_num = nums[left]
# else:
# res = max(res, right-left)
# left += 1
# if max_num in nums[left:right]:
# pass
# else:
# max_num = max(nums[left:right])
# if min_num in nums[left:right]:
# pass
# else:
# min_num = min(nums[left:right])
# return res
if __name__ == '__main__':
s = Solution()
print(s.longestSubarray([4,2,2,2,4,4,2,2], 0))
|
[
"1043994188@qq.com"
] |
1043994188@qq.com
|
119e73b6e8614a9c1f97011dc6eecc48113f1c39
|
023763d9f86116381f5765c51fb8b403e8eef527
|
/BootCamp_easy/agc004_a.py
|
f9d155c213a8b4709afce8680a27d6e384024c8e
|
[] |
no_license
|
Hilary02/atcoder
|
d45589682159c0f838561fc7d0bd25f0828e578b
|
879c74f3acc7befce75abd10abf1ab43967fc3c7
|
refs/heads/master
| 2021-07-18T11:34:22.702502
| 2021-07-11T09:04:12
| 2021-07-11T09:04:12
| 144,648,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
n = [int(w) for w in input().split()]
ans = 0
if any([w % 2 == 0 for w in n]):
ans = 0
else:
n.sort()
ans = (n[0]*n[1])
print(ans)
|
[
"c011605154@edu.teu.ac.jp"
] |
c011605154@edu.teu.ac.jp
|
aac72438be6b9f63676bc9abcc3191455c5a9e02
|
ba88b66e61f0fd1ec0719b61568f0c883d02e534
|
/entities/migrations/0002_auto_20200727_2336.py
|
98c50866f7b192f751c81265db69327dca9d464d
|
[] |
no_license
|
bnmng/spltcs
|
fbc9b5fb5342f5ee0a8bd080f957b4022509b3e9
|
5f19136d8a266b3d2094397cafe41b3ca1f45e78
|
refs/heads/master
| 2020-12-26T18:47:07.348996
| 2020-08-02T21:57:44
| 2020-08-02T21:57:44
| 237,602,374
| 0
| 0
| null | 2020-03-03T15:07:04
| 2020-02-01T11:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 983
|
py
|
# Generated by Django 3.0.5 on 2020-07-27 23:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('entities', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entity',
name='user',
field=models.ForeignKey(blank=True, help_text='The user associated with this entity', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='entity', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='email',
name='entity',
field=models.ForeignKey(blank=True, help_text='The entity who has this email address', null=True, on_delete=django.db.models.deletion.SET_NULL, to='entities.Entity', verbose_name='Entity'),
),
]
|
[
"benjamin@bnmng.com"
] |
benjamin@bnmng.com
|
015ae6ca83ec48f43cc9b164e7a7046d5dfa4f90
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02696/s442403250.py
|
aa7dd421cd2e17f9ea28382dc57fc3251d1099f4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
import sys
def input():
return sys.stdin.readline()[:-1]
def main():
A, B, N = map(int,input().split())
if B - 1 <= N:
print(A * (B - 1) // B)
else:
print(A * N // B)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6e3ee2cf99d9871b230518dddfa45f5786599471
|
f3dddaa239bb428312a46307f1fe2321a1c89c68
|
/electron_project/devices/migrations/0005_devicesparepartrelation_diagram_code.py
|
148e96d961c9c4682df27cb6115e35d246f69d6e
|
[] |
no_license
|
TestAccount2077/mas-electronics-maintenance
|
e99d9e41c5ccbbc12670c269546dd7be6f48af10
|
a53399cb59f201ce4bd0bca8cb2eb0dbea396915
|
refs/heads/master
| 2020-03-31T09:40:42.900983
| 2019-01-15T09:46:08
| 2019-01-15T09:46:08
| 152,105,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-11-24 09:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devices', '0004_maintenancedevice_synced'),
]
operations = [
migrations.AddField(
model_name='devicesparepartrelation',
name='diagram_code',
field=models.CharField(default='', max_length=300),
),
]
|
[
"maselectronics594@gmail.com"
] |
maselectronics594@gmail.com
|
5625f8133d88c28ad6bdcfbcaf069494513639d2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_flunked.py
|
42c1e36c44049aaf1dfc6cdc5cff24f4a52ff91a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.verbs._flunk import _FLUNK
#calss header
class _FLUNKED(_FLUNK, ):
def __init__(self,):
_FLUNK.__init__(self)
self.name = "FLUNKED"
self.specie = 'verbs'
self.basic = "flunk"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
fd543333d1da171fadb7732b118a35887c5b68f1
|
41c605bf3a002a757cb2344cff526d7a7ae56ea9
|
/_plotly_utils/exceptions.py
|
11a19a5c7c6fe4348451cb4cde8a903141ea1d55
|
[
"MIT"
] |
permissive
|
Jonathan-MW/plotly.py
|
9674b90b5de11fd9089e6afefd04b57bc4587829
|
7528c00772f44dee24c0df7e15d70a4852f171a8
|
refs/heads/master
| 2020-05-30T06:04:13.621478
| 2019-05-31T10:34:15
| 2019-05-31T10:34:15
| 189,571,988
| 2
| 0
|
MIT
| 2019-05-31T09:59:53
| 2019-05-31T09:59:53
| null |
UTF-8
|
Python
| false
| false
| 3,239
|
py
|
class PlotlyError(Exception):
pass
class PlotlyEmptyDataError(PlotlyError):
pass
class PlotlyGraphObjectError(PlotlyError):
def __init__(self, message='', path=(), notes=()):
"""
General graph object error for validation failures.
:param (str|unicode) message: The error message.
:param (iterable) path: A path pointing to the error.
:param notes: Add additional notes, but keep default exception message.
"""
self.message = message
self.plain_message = message # for backwards compat
self.path = list(path)
self.notes = notes
super(PlotlyGraphObjectError, self).__init__(message)
def __str__(self):
"""This is called by Python to present the error message."""
format_dict = {
'message': self.message,
'path': '[' + ']['.join(repr(k) for k in self.path) + ']',
'notes': '\n'.join(self.notes)
}
return ('{message}\n\nPath To Error: {path}\n\n{notes}'
.format(**format_dict))
class PlotlyDictKeyError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {'attribute': path[-1], 'object_name': obj._name}
message = ("'{attribute}' is not allowed in '{object_name}'"
.format(**format_dict))
notes = [obj.help(return_help=True)] + list(notes)
super(PlotlyDictKeyError, self).__init__(
message=message, path=path, notes=notes
)
class PlotlyDictValueError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {'attribute': path[-1], 'object_name': obj._name}
message = ("'{attribute}' has invalid value inside '{object_name}'"
.format(**format_dict))
notes = [obj.help(path[-1], return_help=True)] + list(notes)
super(PlotlyDictValueError, self).__init__(
message=message, notes=notes, path=path
)
class PlotlyListEntryError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {'index': path[-1], 'object_name': obj._name}
message = ("Invalid entry found in '{object_name}' at index, '{index}'"
.format(**format_dict))
notes = [obj.help(return_help=True)] + list(notes)
super(PlotlyListEntryError, self).__init__(
message=message, path=path, notes=notes
)
class PlotlyDataTypeError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {'index': path[-1], 'object_name': obj._name}
message = ("Invalid entry found in '{object_name}' at index, '{index}'"
.format(**format_dict))
note = "It's invalid because it doesn't contain a valid 'type' value."
notes = [note] + list(notes)
super(PlotlyDataTypeError, self).__init__(
message=message, path=path, notes=notes
)
|
[
"noreply@github.com"
] |
Jonathan-MW.noreply@github.com
|
8917de073014ee1190491690304d2112fceb28ab
|
28de04457e8ebcd1b34494db07bde8a3f25d8cf1
|
/easy/middle_of_the_linked_list_876.py
|
2530238daf61cbeaa48f6bb08b1fd5ae8d58ff33
|
[] |
no_license
|
YangXinNewlife/LeetCode
|
1df4218eef6b81db81bf2f0548d0a18bc9a5d672
|
20d3d0aa325d79c716acfc75daef32f8d4f9f1ad
|
refs/heads/master
| 2023-08-16T23:18:29.776539
| 2023-08-15T15:53:30
| 2023-08-15T15:53:30
| 70,552,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Solutions:
题目很好理解,就是给一个单链表。
需要找到中间节点,奇数的话直接返回,偶数的话,返回后一个即可,那么怎么扫描一遍返回中间节点呢?
我们可以用两个快慢指针。快指针每次走两个,慢指针每次走一个。
"""
class MiddleOfTheLinkedList(object):
def middleNode(self, head: ListNode) -> ListNode:
temp = ListNode(0)
temp.next = head
slow_p, fast_p = temp, temp
while fast_p and fast_p.next:
slow_p = slow_p.next
fast_p = fast_p.next.next
return slow_p.next if fast_p else slow_p
|
[
"yangxin03@youxin.com"
] |
yangxin03@youxin.com
|
c8905d198c2817f8c72f763ae583f167c2b5413f
|
f38ce96def797a2095e153b1bb4badf83b59b61c
|
/alarm_emaild.py
|
7ac1f0fc8ad2ee654d9a5805e617737e4db1f2e6
|
[] |
no_license
|
jy02383505/bermuda3
|
7883d8e701a9369ad6dd935db96866dd24c079a5
|
284119226c963d638afe61d1593bc60b7ec85a49
|
refs/heads/master
| 2022-02-03T14:40:10.395805
| 2020-01-03T03:21:41
| 2020-01-03T03:21:41
| 231,504,181
| 1
| 0
| null | 2022-01-06T22:41:00
| 2020-01-03T03:21:19
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by 'vance' on '11/25/14'.
__doc__ = ''
__ver__ = '1.0'
__author__ = 'vance'
from util.failed_task_alarm import run
def main():
run()
if __name__ == "__main__":
main()
exit()
|
[
"forgivemee@qq.com"
] |
forgivemee@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.