blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a8205ea8bf4a13ab2b4978ff9aeb97f09467458
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2017/nonfatal_code/congenital/custom/submit_denominator.py
|
07ac5a4861e1421a1304e8ff82527b617fbb3681
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
from __future__ import division
import subprocess
import numpy as np
import pandas as pd
import os
import shutil
import glob
from db_queries import get_location_metadata
username = 'USERNAME'
root = "FILEPATH"
error_path = "FILEPATH"
output_path = "FILEPATH"
if not os.path.exists(error_path):
os.makedirs(error_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
out_dir = "FILEPATH"
share_dir = "FILEPATH"
loc_meta = get_location_metadata(location_set_id=35, gbd_round_id=5)
loc_meta = loc_meta.loc[loc_meta.most_detailed==1, ['location_id', 'ihme_loc_id']]
if not os.path.exists(share_dir):
os.makedirs(share_dir)
else:
shutil.rmtree(share_dir)
os.makedirs(share_dir)
job_string = ""
for index, row in loc_meta.iterrows():
if row.location_id > -1:
job_name = 'denom_{}'.format(row.location_id)
job_string = job_string + ',' + job_name
call = ('qsub -hold_jid {hj} -l mem_free=4.0G -pe multi_slot 4'
' -cwd -P proj_custom_models'
' -o {o}'
' -e {e}'
' -N {jn}'
' cluster_shell.sh'
' calc_denominator.py'
' {arg1} {arg2} {arg3}'.format(hj='no_holds',
o=output_path, e=error_path, jn=job_name,
arg1=share_dir, arg2=row.location_id,
arg3=row.ihme_loc_id))
subprocess.call(call, shell=True)
hold = job_string
params = [share_dir, out_dir,
'--loc_list',
" ".join([str(x) for x in loc_meta.location_id.tolist()])]
call = ('qsub -hold_jid {hj} -l mem_free=10.0G -pe multi_slot 5'
' -cwd -P proj_custom_models'
' -o {o}'
' -e {e}'
' -N {jn}'
' cluster_shell.sh'
' combine_denominators.py'
' {arg1}'.format(hj=hold, o=output_path, e=error_path,
jn='combine_denominators', arg1=' '.join(params)))
subprocess.call(call, shell=True)
|
[
"nsidles@uw.edu"
] |
nsidles@uw.edu
|
0061ccf1ef26e200b24c73b71bbf35962c0f8378
|
77d4d5a1881297dce3003560e04a2e39a97d4465
|
/code_chef/BFTT.py
|
cc455f05d204c719300571a84d14da9bfe603165
|
[] |
no_license
|
gomsterX/competitive_programming
|
c34820032c24532d62325a379590a22fa812159a
|
72ac1fe61604e5a5e41f336bb40377fd7e4738d7
|
refs/heads/master
| 2023-07-19T21:28:16.205718
| 2021-09-02T14:18:44
| 2021-09-02T14:18:44
| 271,074,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#Problem ID: BFTT
#Problem Name: Balsa For The Three
for _ in range(int(input())):
n = int(input())
n+=1
while True:
if(str(n).count('3') >=3):
break
n+=1
print(n)
|
[
"mohamedmoussaa7@gmail.com"
] |
mohamedmoussaa7@gmail.com
|
1d1f0142466d34eff9b75716ab2c1484f3656a7e
|
50fb25631cdc03a868f09061e76f4dedf85f2d3f
|
/crawler_sys/site_crawler/crawler_sogou.py
|
a5329fd56d4cff00cd248b908d874e33674e74c5
|
[] |
no_license
|
silade/crawler
|
20a88c0eb6471f79a5d5daf947dcbff681d11e6e
|
fbfe3c4feca8be61186aec986b600b36f513f7f4
|
refs/heads/main
| 2023-03-10T10:06:21.097103
| 2021-02-19T16:00:45
| 2021-02-19T16:00:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,663
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 09:30:20 2018
@author: fangyucheng
"""
import time
import requests
from bs4 import BeautifulSoup
from crawler.crawler_sys.utils.trans_strtime_to_timestamp import trans_strtime_to_timestamp
cookie = ('YYID=2FFBDAA6D4FBA37438F4067C8123E98B; IMEVER=8.5.0.1322;'
'SUID=3D03FF723865860A59795A5F000BB71F;'
'SUV=00C039A172FF033D5993ADBD770E7410; usid=lF0F7il0yWbXF5c9;'
'IPLOC=CN1100; sct=11; SMYUV=1512954490386200;'
'ad=19fxxkllll2zKxvnlllllVHr6$UllllltsDRlyllll9llllljgDll5@@@@@@@@@@;'
'SNUID=D0DE5A671A1E68C31FB628911B8277A5; wuid=AAGPcSphIAAAAAqLE2OSTQgAGwY=;'
'UM_distinctid=16449b02797449-0c5d9293f4a833-143f7040-1fa400-16449b02799881;'
'CXID=794EC592A14CE76F5DF3F3A3BDDDD787;'
'ld=Kyllllllll2bWX10QTIdJOHDsvSbWX1uK94Vhkllll9lllllVklll5@@@@@@@@@@;'
'cd=1534754086&17502a3f56c02f72dfd43a17cbb19663;'
'rd=Vyllllllll2bBEqoQLWCNCHfKv2bWX1uzX0atkllllwllllRVllll5@@@@@@@@@@;'
'LSTMV=173%2C72; LCLKINT=1570')
headers = {'Host': 'news.sogou.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Cookie': cookie,
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0'}
def sogou_info_page(keyword):
result_lst = []
for page_num in range(1,11):
search_url = 'http://news.sogou.com/news?&query='+keyword+'&page='+str(page_num)
get_page = requests.get(search_url, headers=headers)
page = get_page.text
soup = BeautifulSoup(page, 'html.parser')
news_lst = soup.find_all('div', {'class': 'vrwrap'})
for line in news_lst:
try:
title = line.div.h3.a.text
url = line.div.h3.a['href']
source_and_release_time = line.find('p', {'class': 'news-from'}).text
source_and_release_time_lst = source_and_release_time.split('\xa0')
source = source_and_release_time_lst[0]
release_time_str = source_and_release_time_lst[-1]
release_time = trans_strtime_to_timestamp(release_time_str)
try:
content = line.find('span').text
except:
print('no content at %s' % title)
content = 'missing'
fetch_time = int(time.time()*1000)
try:
similar_news = line.find('a', {'id': 'news_similar'}).text
except:
print('no similar news at %s' % title)
similar_news = 'missing'
news_info = {'title': title,
'url': url,
'source': source,
'release_time': release_time,
'fetch_time': fetch_time,
'content': content,
'similar_news': similar_news,
'keyword': keyword}
result_lst.append(news_info)
print('get data at page %s' % page_num)
except:
('the error occured at position %s' % news_lst.index(line))
return result_lst
if __name__=='__main__':
keyword = '中超'
test_sogou = sogou_info_page(keyword)
|
[
"593516104@qq.com"
] |
593516104@qq.com
|
b8b00509599fec72275e7b0df844db765b98d0f4
|
bb313586d9894a6b1a985d2f8b0a1f8e62907481
|
/videocap1.py
|
3c449b3f6fcfcb91ea2de0cc70bd2b53eb3c944c
|
[] |
no_license
|
HelloDivyani/OpenCV
|
c3538fc8e8985fcbb24bf951f16977b1d23e93a5
|
96fa8aa563393cfbb15913fd0df863c891d00717
|
refs/heads/master
| 2021-01-12T03:45:35.227275
| 2016-11-10T12:14:29
| 2016-11-10T12:14:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import numpy as np
import cv2
video_path = ""
cap = cv2.VideoCapture(video_path)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"rishabhmadan96@gmail.com"
] |
rishabhmadan96@gmail.com
|
75d5a74d1e78fae22fe705de6e87225c93e25cdc
|
c6f15aa103de030f7eea6c1aaf6e7ad0ec88dbc1
|
/add/features/10_binmap/viewer/app/gui/MainFrame.py
|
fbff879a00f19d0ac8a16c8b54567d556f59fbf3
|
[] |
no_license
|
sysdeep/dcat
|
6f3478348113b0d1206f82456f5bd80431282daf
|
f8c801173ace4447018c3034c56254ab1a6d4089
|
refs/heads/master
| 2023-05-03T16:04:28.027335
| 2023-04-17T15:04:04
| 2023-04-17T15:04:04
| 320,551,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QHBoxLayout, QVBoxLayout, QWidget, QGroupBox, QGridLayout
from app.shared import get_storage
from .VolumeInfo import VolumeInfo
from .explorer.Explorer import Explorer
class MainFrame(QWidget):
def __init__(self, parent=None):
super(MainFrame, self).__init__(parent)
layout = QVBoxLayout()
self.setLayout(layout)
self.__volume_info = VolumeInfo()
self.__explorer = Explorer()
layout.addWidget(self.__volume_info)
layout.addWidget(self.__explorer)
def start(self):
storage = get_storage()
self.__volume_info.set_info(storage.volume.volume_header)
self.__explorer.show_root()
|
[
"sysdeep@yandex.ru"
] |
sysdeep@yandex.ru
|
369ccaa36c9e88ee6f2b28a35a7d5edc5337d3e5
|
eedde715576cbbc195c3f6049636b7115895b138
|
/pandas_doit/pandas_doit_graph/pandas_doit_graphes.py
|
8dbe02409cf10365758de802b50cd103399f2c54
|
[] |
no_license
|
azegun/workspace_python
|
e75de706c08edfe7c5c8d88e8b4679578d6aa68f
|
8edf1683a57e468a2e665be0092bc516a28571fd
|
refs/heads/master
| 2023-07-05T10:42:07.955389
| 2021-08-13T03:27:52
| 2021-08-13T03:27:52
| 388,668,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import seaborn as sns
import matplotlib.pyplot as plt
tips = sns.load_dataset('tips')
print(tips)
# 막대 그래프
fig = plt.figure()
axes1 = fig.add_subplot(1, 1, 1)
axes1.hist(tips['total_bill'], bins=10) # bins 지정시 x축의 간격을 10으로 조정
axes1.set_title('Histogram of Total Bill')
axes1.set_xlabel('Frequency')
axes1.set_ylabel('Total Bill')
# 산계형 그래프
scatter_plot = plt.figure()
axes1 = scatter_plot.add_subplot(1, 1, 1)
axes1.scatter(tips['total_bill'], tips['tip'])
axes1.set_title('Scatterplot of Total Bill Vs Tip')
axes1.set_xlabel('Total Bill')
axes1.set_ylabel('Tip')
boxplot = plt.figure()
axes1 = boxplot.add_subplot(1, 1, 1)
axes1.boxplot([tips[tips['sex'] == 'Female']['tip'],
tips[tips['sex'] == 'Male']['tip']],
labels=['Female', 'Male'])
axes1.set_xlabel('Sex')
axes1.set_ylabel('Tip')
axes1.set_title('Boxplot of Tips by Sex')
plt.show()
|
[
"tkdrjs7@naver.com"
] |
tkdrjs7@naver.com
|
fa6054203e5f3135b3d56a0bc695a47469cac9a5
|
cb3bce599e657188c30366adb0af3007ff9b8f96
|
/src/note/test_pachongbaidu.py
|
47392a7ab5c0759490c9e2e271a9216663c6e7ea
|
[] |
no_license
|
skk4/python_study
|
534339e6c378d686c29af6d81429c472fca19d6d
|
4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987
|
refs/heads/master
| 2021-01-01T04:36:52.037184
| 2017-12-08T01:04:27
| 2017-12-08T01:04:27
| 97,207,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
# -*- coding:utf-8 -*-
import urllib
import urllib2
import json
while 1:
content = raw_input(">:")
headers = {
'Referer': 'http://fanyi.baidu.com/?aldtype=16047/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
data = {}
data['from'] = 'en'
data['to'] = 'zh'
data['query'] = content
data['transtype'] = 'translang'
data['simple_means_flag'] = '3'
url = 'http://fanyi.baidu.com/v2transapi'
values = urllib.urlencode(data)
rq = urllib2.Request(url, values, headers)
fd = urllib2.urlopen(rq)
#print fd.getcode()
html = fd.read()
#print html
#print html
dst = json.loads(html)
print dst['trans_result']['data'][0]['dst']
|
[
"skk_4@163.com"
] |
skk_4@163.com
|
36e3c9586af6106c678d5bcac19e2ab7d0f50adc
|
44e8334e1b17fda7f60d9760f59868a9227e2ab0
|
/ML/ch10_9.py
|
a477bc9725a42a97d551bd6ada02ecd600ffca8d
|
[] |
no_license
|
MysteriousSonOfGod/python-3
|
47c2aa69a84ba78876c74bc6f2e7e6f3093df1e2
|
a303a5284c40f3cb96a8082a1f5ed80773b66336
|
refs/heads/master
| 2023-02-16T18:21:46.153388
| 2021-01-13T10:55:14
| 2021-01-13T10:55:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,853
|
py
|
import pandas as pd
import numpy as np
import mglearn
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
# 9. 두 개의 클래스를 가진 2차원 데이터셋 make_moons
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
print("X.shape: {}".format(X.shape))
print("y.shape: {}".format(y.shape))
print("X 타입: {}".format(type(X)))
print("y 타입: {}".format(type(y)))
print(X[:5], y[:5])
###############################################################################
# 1. 타깃값으로 군집 평가하기 : 군집 알고리즘의 결과를 실제 정답 클러스터와 비교하여 평가할 수 있는 지표
# 1. ARI (adjusted rand index)
# ARI : 1(최적일 때)와 0(무작위로 분류될 때)
# 2. NMI (normalized mutual information)
#
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
fig, axes = plt.subplots(1, 4, figsize=(15, 3), subplot_kw={'xticks':(), 'yticks':()})
# 3가지 알고리즘들 리스트
algos = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2), DBSCAN()]
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X))
# 무작위로 할당한 클러스터
from sklearn.metrics.cluster import adjusted_rand_score
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("random assign - ARI : {:.2f}".format(adjusted_rand_score(y, random_clusters)))
for ax, algo in zip(axes[1:], algos):
clusters = algo.fit_predict(X_scaled)
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
ax.set_title("{} - ARI: {:.2f}".format(algo.__class__.__name__, adjusted_rand_score(y, clusters)))
# plt.title('복잡한 모양의 클러스터 군집 알고리즘 비교')
images.image.save_fig("10.9.moons_spiral_scatter_adjusted_rand_score")
plt.show()
# 2. 타깃값 없이 군집 평가하기 - 실루엣 계수
# 군집 알고리즘을 적용할 때 보통 그 결과와 비교할 타깃값이 없다.
# 타깃값이 필요 없는 군집용 지표로는 실루엣 계수 (silhouette coefficient)가 있다.
# 그러나 이 지표는 실제로 잘 동작하진 않는다.
# 실루엣 점수는 클러스터의 밀집 정도를 계산하는 것으로, 높을수록 좋으며, 최대 점수는 1이다.
# 실루엣 계수 사용하여 k-평균, 병합군집, DBSCAN 알고리즘을 비교
fig, axes = plt.subplots(1, 4, figsize=(15, 3), subplot_kw={'xticks':(), 'yticks':()})
# 3가지 알고리즘들 리스트
# algos = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2), DBSCAN()]
# random_state = np.random.RandomState(seed=0)
# random_clusters = random_state.randint(low=0, high=2, size=len(X))
# 무작위로 할당한 클러스터
from sklearn.metrics.cluster import silhouette_score
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("random assign : {:.2f}".format(silhouette_score(X_scaled, random_clusters)))
for ax, algo in zip(axes[1:], algos):
clusters = algo.fit_predict(X_scaled)
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
ax.set_title("{} : {:.2f}".format(algo.__class__.__name__, silhouette_score(X_scaled, clusters)))
# plt.title('복잡한 모양의 클러스터 군집 알고리즘 비교')
images.image.save_fig("10.9.moons_spiral_scatter_silhouette_score")
plt.show()
|
[
"cbaeck1@gmail.com"
] |
cbaeck1@gmail.com
|
d2a3053fd64857bb28bf86e6ff1a7e69900c528b
|
1ad12a71c3d5d2b3810ce03e8bd138c4ffb66eb8
|
/xlsxwriter/test/comparison/test_chart_pie02.py
|
772e70f53f3fe101ff7ea093d7ef19814e4f5983
|
[
"BSD-2-Clause-Views"
] |
permissive
|
idreamsfy/XlsxWriter
|
b52929229b16e2ee1eaca0cda9980a5a0aad5769
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
refs/heads/master
| 2021-01-02T20:39:20.415882
| 2020-02-07T21:07:55
| 2020-02-07T21:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pie02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'font': {'bold': 1, 'italic': 1, 'baseline': -1}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
bca4a0189f323184e4d9d842f593edc9890ec469
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02658/s377864583.py
|
a4bd4508fb86c22e9ea006ed431ef20ad6050566
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
def main():
n = int(input())
a = list(map(int, input().split()))
ans = 1
if 0 in a:
print(0)
return
else:
flag = True
for i in a:
ans *= i
if ans > (10 ** 18):
print(-1)
return
print(ans)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a9f9d1d30ff078ad375c253215133f498c5895b9
|
9d29861e44389e02762e6eb0457c6a415a54e26c
|
/samples/images/export_task.py
|
64e408194a49b511eccab70c08ad4f8b855b7f79
|
[
"MIT"
] |
permissive
|
itsmemattchung/pyrax
|
e787d67f8a79036834575f951f8c9e81d64d8b8f
|
e8eff127a5c9b6e64a9a42593d5e889c3c03f81d
|
refs/heads/master
| 2021-01-18T10:14:31.752469
| 2015-05-16T16:44:35
| 2015-05-16T16:44:35
| 21,360,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,354
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
imgs = pyrax.images
cf = pyrax.cloudfiles
print("You will need to select an image to export, and a Container into which "
"the exported image will be placed.")
images = imgs.list(visibility="private")
print()
print("Select an image to export:")
for pos, image in enumerate(images):
print("[%s] %s" % (pos, image.name))
snum = raw_input("Enter the number of the image you want to share: ")
if not snum:
exit()
try:
num = int(snum)
except ValueError:
print("'%s' is not a valid number." % snum)
exit()
if not 0 <= num < len(images):
print("'%s' is not a valid image number." % snum)
exit()
image = images[num]
conts = cf.list()
print()
print("Select the target container to place the exported image:")
for pos, cont in enumerate(conts):
print("[%s] %s" % (pos, cont.name))
snum = raw_input("Enter the number of the container: ")
if not snum:
exit()
try:
num = int(snum)
except ValueError:
print("'%s' is not a valid number." % snum)
exit()
if not 0 <= num < len(conts):
print("'%s' is not a valid container number." % snum)
exit()
cont = conts[num]
task = imgs.export_task(image, cont)
print("Task ID=%s" % task.id)
print()
answer = raw_input("Do you want to track the task until completion? This may "
"take several minutes. [y/N]: ")
if answer and answer[0].lower() == "y":
pyrax.utils.wait_until(task, "status", ["success", "failure"],
verbose=True, interval=30)
|
[
"ed@leafe.com"
] |
ed@leafe.com
|
86df25291145f18d5fc9e052389ac8390fba23ec
|
5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba
|
/algorithm-study/baekjun/1927.py
|
6029af1ee152bdfa3b91b592febe7a1fbf701b55
|
[] |
no_license
|
namujinju/study-note
|
4271b4248b3c4ac1b96ef1da484d86569a030762
|
790b21e5318a326e434dc836f5f678a608037a8c
|
refs/heads/master
| 2023-02-04T13:25:55.418896
| 2020-12-26T10:47:11
| 2020-12-26T10:47:11
| 275,279,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
import heapq as hq
import sys
hq_arr = []
n = int(input()) # 연산 갯수
for _ in range(n):
i = int(sys.stdin.readline()) # https://www.acmicpc.net/blog/view/56
if i:
hq.heappush(hq_arr, i)
else:
if hq_arr:
print(hq.heappop(hq_arr))
else:
print(0)
|
[
"59328810+namujinju@users.noreply.github.com"
] |
59328810+namujinju@users.noreply.github.com
|
93f739b3253aa3891ea450f662d32d5466856a30
|
ca23b411c8a046e98f64b81f6cba9e47783d2584
|
/learning_parameter_allocation/omniglot/omniglot_eval.py
|
57302ece23bb7e33211ed04e68884facbf8e35a4
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pdybczak/google-research
|
1fb370a6aa4820a42a5d417a1915687a00613f9c
|
0714e9a5a3934d922c0b9dd017943a8e511eb5bc
|
refs/heads/master
| 2023-03-05T23:16:11.246574
| 2021-01-04T11:30:28
| 2021-01-04T11:30:28
| 326,629,357
| 1
| 0
|
Apache-2.0
| 2021-02-01T12:39:09
| 2021-01-04T09:17:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation job for the Omniglot experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from learning_parameter_allocation import data
from learning_parameter_allocation import models
from learning_parameter_allocation import utils
from learning_parameter_allocation.pathnet import components as pn_components
from learning_parameter_allocation.pathnet import pathnet_lib as pn
from learning_parameter_allocation.pathnet.utils import create_uniform_layer
import tensorflow.compat.v1 as tf
_OMNIGLOT_INPUT_SHAPE = [105, 105, 1]
# Delay in seconds to wait before rechecking if there are new checkpoints.
_CHECK_FOR_CHECKPOINTS_FREQUENCY = 15
# If there are no checkpoints for this number of seconds give up and finish.
_MAX_WAIT_FOR_NEW_CHECKPOINTS = 3 * 60 * 60
FLAGS = flags.FLAGS
flags.DEFINE_string(
'logdir', '/tmp/summary_dir/',
'Path to the directory to save logs and summaries.')
flags.DEFINE_string(
'method', 'gumbel_matrix',
'Approach to use to determine which tasks gets which components, '
'one of "shared_bottom", "no_sharing", "gumbel_matrix".')
def loss_fn(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
def build_pathnet_eval_graph(
task_names, batch_size, num_classes_for_tasks, router_fn):
"""Constructs the PathNet eval graph.
Args:
task_names: (list of strings) names of tasks.
batch_size: (int) batch size to use.
num_classes_for_tasks: (list of ints) number of classes for each task.
router_fn: function that, given a single argument `num_components`, returns
a router (see routers in `pathnet/pathnet_lib.py`) for a layer containing
`num_components` components.
Returns:
A tuple of (`p_inputs`, `p_task_id`, `out_logits`). `p_inputs` and
`p_task_id` are placeholders for input image and scalar task id,
respectively. `out_logits` are the final network output (classification
logits).
"""
num_tasks = len(task_names)
# PathNet layers
keras_layers = models.get_keras_layers_for_omniglot_experiment()
pathnet_layers = models.build_model_from_keras_layers(
_OMNIGLOT_INPUT_SHAPE, num_tasks, keras_layers, router_fn)
# Task-specific linear heads
pathnet_layers.append(
utils.create_layer_with_task_specific_linear_heads(num_classes_for_tasks))
# Output components
pathnet_layers.append(create_uniform_layer(
num_components=num_tasks,
component_fn=lambda: pn_components.ModelHeadComponent(loss_fn=loss_fn),
combiner_fn=pn.SelectCombiner,
router_fn=lambda: None))
pathnet = pn.PathNet(
pathnet_layers, tf.contrib.training.HParams(batch_size=batch_size))
p_inputs, _, p_task_id, _, out_logits = utils.build_pathnet_graph(
pathnet, _OMNIGLOT_INPUT_SHAPE, training=False)
return p_inputs, p_task_id, out_logits
def main(_):
num_alphabets = 20
task_names = ['Omniglot-%d' % task_id for task_id in range(num_alphabets)]
task_data, num_classes = data.get_data_for_multitask_omniglot_setup(
num_alphabets)
batch_size = 16
for task_id in range(num_alphabets):
task_data[task_id] = data.batch_all(task_data[task_id], batch_size)
router_fn = utils.get_router_fn_by_name(num_alphabets, FLAGS.method)
session = tf.Session(graph=tf.get_default_graph())
tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.logdir)
summary_writer.set_as_default()
tf.contrib.summary.initialize(session=session)
p_inputs, p_task_id, out_logits = build_pathnet_eval_graph(
task_names, batch_size, num_classes, router_fn)
evaluate_on = ['train', 'validation', 'test']
p_task_accuracies = {}
accuracy_summary_op = {}
for data_split in evaluate_on:
(p_task_accuracies[data_split], accuracy_summary_op[data_split]) =\
utils.create_accuracy_summary_ops(
task_names, summary_name_prefix='eval_%s' % data_split)
# This `Saver` is not used to save variables, only to restore them from
# the checkpoints.
saver = tf.train.Saver(tf.global_variables())
previous_checkpoint_path = ''
time_waited_for_checkpoints = 0
while time_waited_for_checkpoints < _MAX_WAIT_FOR_NEW_CHECKPOINTS:
latest_checkpoint_path = tf.train.latest_checkpoint(FLAGS.logdir)
if latest_checkpoint_path in [None, previous_checkpoint_path]:
print('Found no new checkpoints')
time_waited_for_checkpoints += _CHECK_FOR_CHECKPOINTS_FREQUENCY
time.sleep(_CHECK_FOR_CHECKPOINTS_FREQUENCY)
continue
else:
time_waited_for_checkpoints = 0
print('Reloading checkpoint: %s' % latest_checkpoint_path)
previous_checkpoint_path = latest_checkpoint_path
saver.restore(session, latest_checkpoint_path)
for data_split in evaluate_on:
eval_data = [
dataset[data_split].make_one_shot_iterator().get_next()
for dataset in task_data
]
print('Evaluating on: %s' % data_split)
task_accuracies = utils.run_pathnet_evaluation(
session, p_inputs, p_task_id, out_logits, task_names, eval_data)
utils.run_accuracy_summary_ops(
session,
p_task_accuracies[data_split],
task_accuracies,
accuracy_summary_op[data_split])
if __name__ == '__main__':
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
02d2173600e6ba222744b73a6ac1cf959bc7a32f
|
d96787f92bd86c8d8bcf01a4e7ec8f7feec24194
|
/kattis/kutevi/solution.py
|
9a687ed8e3f0ebe800bdb8b0ba3849c123defb8f
|
[] |
no_license
|
iandioch/solutions
|
133cbc3af58fadcde0b2e981fb0e7d05801070a7
|
8b3e458b3c01179ddf776bfbb897f263f22f3693
|
refs/heads/master
| 2023-04-09T03:39:16.952817
| 2023-03-15T20:00:53
| 2023-03-15T20:00:53
| 47,693,495
| 48
| 40
| null | 2019-10-22T14:52:59
| 2015-12-09T13:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
from collections import deque
n, m = map(int, input().split())
poss = [False for _ in range(360)]
given = list(map(int, input().split()))
q = deque()
q.append(given[0])
while len(q):
a = q.pop()
if poss[a]:
continue
poss[a] = True
for o in given:
b = abs(a - o)
if not poss[b]:
q.append(b)
c = (a+o)%360
if not poss[c]:
q.append(c)
for a in input().split():
ok = poss[int(a)]
if ok:
print('YES')
else:
print('NO')
|
[
"iandioch11@gmail.com"
] |
iandioch11@gmail.com
|
2005e615b5cc1e94cd02c98c46a797303e28e8b4
|
3ed50263057c1695330009f9f5b122e412e1c02f
|
/bn/distribs/multivariate_distribution.py
|
353b2f69ef7384b8f6ede8151da65d51d4e14d4f
|
[
"MIT"
] |
permissive
|
ppijbb/PyOpenDial
|
5528aa584190dcf08b892ec92a5ce8c2b82eb845
|
c9bca653c18ccc082dc8b86b4a8feee9ed00a75b
|
refs/heads/master
| 2022-02-16T01:27:39.667661
| 2019-07-24T10:51:41
| 2019-07-24T10:51:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,027
|
py
|
import abc
from multipledispatch import dispatch
from datastructs.assignment import Assignment
class MultivariateDistribution:
"""
Representation of a multivariate probability distribution P(X1,...Xn), where
X1,...Xn are random variables.
"""
__metaclass__ = abc.ABCMeta
@dispatch()
@abc.abstractmethod
def get_variables(self):
"""
Returns the names of the random variables in the distribution
:return: the set of variable names.
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def get_values(self):
"""
Returns the set of possible assignments for the random variables.
:return: the set of possible assignment
"""
raise NotImplementedError()
@dispatch(Assignment)
@abc.abstractmethod
def get_prob(self, values):
"""
Returns the probability of a particular assignment of values.
:param values: the assignment of values to X1,...Xn.
:return: the corresponding probability
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def sample(self):
"""
Returns a sample assignment for X1,...Xn.
:return: the sampled assignment
"""
raise NotImplementedError()
@dispatch(str)
@abc.abstractmethod
def get_marginal(self, variable):
"""
Returns the marginal probability distribution P(Xi) for a random variable Xi
in X1,...Xn.
:param variable: the random variable Xi
:return: the marginal distribution P(Xi)
"""
raise NotImplementedError()
@dispatch(str, str)
@abc.abstractmethod
def modify_variable_id(self, old_variable_id, new_variable_id):
"""
Modifies the variable identifier in the distribution
:param old_variable_id: the old identifier
:param new_variable_id: the new identifier
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def to_discrete(self):
"""
Returns a representation of the distribution as a multivariate table.
:return: the multivariate table.
"""
raise NotImplementedError()
@abc.abstractmethod
def __copy__(self):
"""
Returns a copy of the distribution.
:return: the copy
"""
raise NotImplementedError()
@dispatch(float)
@abc.abstractmethod
def prune_values(self, threshold):
"""
Prunes all values assignment whose probability falls below the threshold.
:param threshold: the threshold to apply
:return: true if at least one value has been removed, false otherwise
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def get_best(self):
"""
Returns the value with maximum probability.
:return: the value with maximum probability
"""
raise NotImplementedError()
|
[
"jys5609@gmail.com"
] |
jys5609@gmail.com
|
bbcc0cc75155387e6ce8b9a138302984c6c21481
|
37f1563cdacf4b37b5b927b892538218aae79c77
|
/medium/trees/flatten.py
|
3a5d24227b7f8b528e32e799246ebdb3f20589e1
|
[] |
no_license
|
unsortedtosorted/elgoog
|
9dee49a20f981305910a8924d86e8f2a16fe14c2
|
5be9fab24c0c1fd9d5dc7a7bdaca105f1ca873ee
|
refs/heads/master
| 2020-04-15T00:51:12.114249
| 2019-05-19T04:37:24
| 2019-05-19T04:37:24
| 164,254,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
"""
114. Flatten Binary Tree to Linked List
Steps:
if leaf node, do nothing
if not leaf node:
flatten left subtree
flatten right subtree
connect right child to right most leaf of left child
make left child as right child
make left child None
RunTime : O(N^2)
Space : O(N)
"""
class Solution(object):
def flatten(self, root):
def convert(root):
if root:
if not root.left and not root.right:
return
#flatten left and right child
convert(root.left)
convert(root.right)
l = root.left
r = root.right
#make left child as new right child
root.right = l
root.left = None
temp = root
#get right most leaf of new right child
while temp.right:
temp = temp.right
temp.right = r
convert(root)
|
[
"noreply@github.com"
] |
unsortedtosorted.noreply@github.com
|
0cad74dfab81fccf5ba4454fd2435cc789439934
|
6b181f5640e2c3df91d1a6d5c95cf1989012f0d5
|
/RPi-stub/spidev.py
|
c598627f4304ee6d4f63e4b17637f97d0978bb97
|
[
"MIT"
] |
permissive
|
GamesCreatorsClub/GCC-Rover
|
9b84dcd84cce60c321906223f8c24f99722d1bae
|
25a69f62a1bb01fc421924ec39f180f50d6a640b
|
refs/heads/master
| 2021-01-11T18:04:05.876976
| 2019-10-01T15:20:30
| 2019-10-01T15:20:30
| 79,477,472
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,320
|
py
|
import os
import socket
_SPI_CPHA = 0x01
_SPI_CPOL = 0x02
# _SPI_MODE_0 = 0
# _SPI_MODE_1 = SPI_CPHA
# _SPI_MODE_2 = SPI_CPOL
# _SPI_MODE_3 = SPI_CPOL | SPI_CPHA
# _SPI_MODES = [_SPI_MODE_0, _SPI_MODE_1, _SPI_MODE_2, _SPI_MODE_3]
_SPI_CS_HIGH = 0x04
_SPI_LSB_FIRST = 0x08
_SPI_3WIRE = 0x10
_SPI_LOOP = 0x20
_SPI_NO_CS = 0x40
_SPI_READY = 0x80
class SpiDev:
_socket = None
_bits_per_word = 0
# cshigh = False
# loop = None
# lsbfirst = False
_max_speed_hz = 0
_mode = 0
# threewire = False
def __init__(self):
port = 8789
ip = os.environ["RASPBERRY_IP"]
if "RASPBERRY_PORT" in os.environ:
port = int(os.environ["RASPBERRY_PORT"])
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((ip, port))
def __del__(self):
if self._socket is not None:
try:
self._socket.close()
except Exception as e:
pass
def open(self, bus, device):
b = bytearray()
b.append(ord("o"))
b.append(bus)
b.append(device)
self._socket.send(b)
def xfer(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
b = bytearray()
b.append(ord("x"))
b.append(len(data) & 255)
b.append(len(data) >> 8 & 255)
for d in data:
b.append(d)
self._socket.send(b)
rec = self._socket.recv(len(data))
resp = []
for bb in rec:
resp.append(bb)
return resp
def xfer2(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
pass
def close(self):
self._mode = 0;
self._bits_per_word = 0;
self._max_speed_hz = 0;
b = bytearray()
b.append(ord("c"))
self._socket.send(b)
def readbytes(self, n):
pass
def writebytes(self, data):
pass
@property
def cshigh(self):
return self._mode & _SPI_CS_HIGH != 0
@cshigh.setter
def cshigh(self, cshigh):
if cshigh:
self._mode = self._mode | _SPI_CS_HIGH
else:
self._mode = self._mode & ~_SPI_CS_HIGH
@property
def lsbfirst(self):
return self._mode & _SPI_LSB_FIRST != 0
@cshigh.setter
def lsbfirst(self, lsbfirst):
if lsbfirst:
self._mode = self._mode | _SPI_LSB_FIRST
else:
self._mode = self._mode & ~_SPI_LSB_FIRST
@property
def threewire(self):
return self._mode & _SPI_3WIRE != 0
@threewire.setter
def threewire(self, threewire):
if threewire:
self._mode = self._mode | _SPI_3WIRE
else:
self._mode = self._mode & ~_SPI_3WIRE
@property
def loop(self):
return self._mode & _SPI_3WIRE != 0
@loop.setter
def loop(self, loop):
if loop:
self._mode = self._mode | _SPI_LOOP
else:
self._mode = self._mode & ~_SPI_LOOP
@property
def bits_per_word(self):
return self._bits_per_word
@bits_per_word.setter
def bits_per_word(self, bits_per_word):
if bits_per_word < 8 or bits_per_word > 16:
raise ValueError("invalid bits_per_word (8 to 16)")
self._bits_per_word = bits_per_word
@property
def max_speed_hz(self):
return self.max_speed_hz
@max_speed_hz.setter
def bits_per_word(self, max_speed_hz):
self.max_speed_hz
@property
def mode(self):
return self._mode & (_SPI_CPHA | _SPI_CPOL)
@mode.setter
def loop(self, mode):
self._mode = (self._mode & ~(_SPI_CPHA | _SPI_CPOL)) | mode
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", 8789))
s.listen(1)
def startListen():
import threading
def session(con):
while True:
# print("Waiting to command")
cmd = ord(con.recv(1))
if cmd == ord("c"):
print("Close")
elif cmd == ord("o"):
bus = ord(con.recv(1))
device = ord(con.recv(1))
print("Opening " + str(bus) + "." + str(device))
elif cmd == ord("x"):
l = ord(con.recv(1))
h = ord(con.recv(1))
size = l + h << 8
print("Receiving " + str(size) +" bytes")
data = con.recv(size)
print("Received " + str(data))
con.send(data)
else:
print("Unknown command " + str(cmd))
def listen():
while True:
con, addr = s.accept()
t = threading.Thread(target=session, args=[con])
t.daemon = True
t.start()
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
try:
startListen()
os.environ["RASPBERRY_IP"] = "127.0.0.1"
spi = SpiDev()
print("opening spi")
spi.open(1, 2)
print("sending data")
spi.xfer(b"Hello")
print("closing")
spi.close()
finally:
s.close()
s.detach()
|
[
"natdan@users.noreply.github.com"
] |
natdan@users.noreply.github.com
|
a2595b2efb0dc2cdd2387f296ae8a5b72d28c811
|
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_03_01/aio/operations/_operations.py
|
9652fce008b36636921de47c082edd72b0107162
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
gaoyp830/azure-sdk-for-python
|
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
|
1c66defa502b754abcc9e5afa444ca03c609342f
|
refs/heads/master
| 2022-10-20T21:33:44.281041
| 2022-09-29T17:03:13
| 2022-09-29T17:03:13
| 250,355,505
| 0
| 0
|
MIT
| 2020-03-26T19:42:13
| 2020-03-26T19:42:12
| null |
UTF-8
|
Python
| false
| false
| 5,460
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_03_01.aio.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationValue"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_03_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} # type: ignore
|
[
"noreply@github.com"
] |
gaoyp830.noreply@github.com
|
b5acde1079fe57a98f775f9003f4e8c68c326e4e
|
41bea39563c74621924d79723f8ba84889958365
|
/nkamg_pcap/server/antimal/misc/trails/feeds/bambenekconsultingdga.py
|
698420381e69cf7f7c349a1e65d60c536efd4101
|
[
"MIT"
] |
permissive
|
NKQiuKF/pcap_update
|
abee0c13cb583fddb89eb9e86a487279bdc18f1d
|
679e3f116367394a5f58eb4f95b5318e80fee331
|
refs/heads/master
| 2022-10-21T17:49:30.706480
| 2019-09-02T09:22:06
| 2019-09-02T09:22:06
| 205,816,421
| 1
| 0
| null | 2022-10-06T18:33:32
| 2019-09-02T08:55:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://osint.bambenekconsulting.com/feeds/dga-feed.txt"
__check__ = "Domain used by"
__reference__ = "bambenekconsulting.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"(?m)^([^,\s]+),Domain used by ([^ ]+)", content):
retval[match.group(1)] = ("%s dga (malware)" % match.group(2).lower(), __reference__)
return retval
|
[
"453341288@qq.com"
] |
453341288@qq.com
|
6fb7d3bd1ca93b6147bb8eb30c92161cb11b930b
|
d867398bd54ef772a624a72e283a6b2bb546f693
|
/signbank/docker_wsgi.py
|
40135228152c02660b293ec2c07228e7b2188eec
|
[
"BSD-3-Clause"
] |
permissive
|
stevecassidy/signbank-modular
|
b5d6aa3fd05b0aeeed9c7bb4688be6d0e5cb7b16
|
f00e30814c8a08fe00eb28df231791f6fc18ce7f
|
refs/heads/master
| 2023-01-13T09:32:39.503182
| 2022-01-12T04:18:59
| 2022-01-12T04:18:59
| 79,797,386
| 1
| 1
|
BSD-3-Clause
| 2022-12-26T20:58:06
| 2017-01-23T11:13:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
"""
WSGI config for signbank project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# Determine if there are live settings (not commited to source control) and load that if it exists instead of the default settings
code_path = os.path.dirname(os.path.realpath(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "signbank.settings.docker")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"steve.cassidy@mq.edu.au"
] |
steve.cassidy@mq.edu.au
|
0751f1fdaa3b2d55769f3fc59c2d654fde132400
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/331/usersdata/302/94330/submittedfiles/funcoes1.py
|
46414bcab930f2ecf75035fde4caf630153d831d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
# -*- coding: utf-8 -*-
'''
def crescente (lista):
if lista == sorted(lista):
return True
else:
return False
def decrescente (lista):
if lista == sorted(lista, reverse = True):
return True
else:
return False
def consectivos (lista,n):
for i in range(0,n,1):
if i < n:
if lista[i-1] =! lista[i]
return False
continue
else:
return True
'''
#escreva o código da função crescente aqui
#escreva as demais funções
#escreva o programa principal
n = int(input('Digite o número de elementos das listas: '))
a = []
b = []
c = []
for i in range (0,n,1):
a.append(int(input('Digite a%d: '%(i+1))))
for i in range(0,n,1):
if i < n:
if a[i-1] == a[i]:
print('S')
break
else:
print('N')
break
'''
print(a)
for i in range (0,n,1):
b.append(int(input('Digite b%d: '%(i+1))))
print(b)
for i in range (0,n,1):
c.append(int(input('Digite c%d: '%(i+1))))
print(c)
'''
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
493abda9b939ba922dde6ee476d341dceaf83a2f
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/effective_python/item_52/recursive_import_bad/dialog.py
|
8138738a13171344c2d6677a8de506fafcda19cb
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
#!/usr/bin/env python3
# Copyright 2014 Brett Slatkin, Pearson Education Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Preamble to mimick book environment
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Example 2
import app
class Dialog(object):
def __init__(self, save_dir):
self.save_dir = save_dir
save_dialog = Dialog(app.prefs.get('save_dir'))
def show():
print('Showing the dialog!')
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
b1bfe0cbfab8073e75cb7be278c8efa2c751956e
|
169edd2e971f78b261c78eb6e927efce7499237a
|
/2017_01_21_297/serializeAndDeserializeBT.py
|
dbaa31e137ab7e81e2b1f075f533908f61ec44c2
|
[] |
no_license
|
wk633/crack_leetcode
|
ce75fc92160a5e618f9cd84f5a6ab4871021f17b
|
e8559773069447f9e986712c45f6a5a53eaeb123
|
refs/heads/master
| 2021-01-13T07:38:50.957328
| 2017-05-12T01:51:21
| 2017-05-12T01:51:21
| 78,284,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def preorder(node):
if node:
vals.append(str(node.val))
preorder(node.left)
preorder(node.right)
else:
vals.append("#")
vals = []
preorder(root)
return " ".join(vals)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def dePreorder():
val = next(vals)
if val == "#":
return None
root = TreeNode(val)
root.left = dePreorder()
root.right = dePreorder()
return root
print data
vals = iter(data.split(" "))
return dePreorder()
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
[
"wk633@outlook.com"
] |
wk633@outlook.com
|
a26332d09362043d07118787666ef90c13046967
|
b63142e8540cb30bb0c663332e29a4112721073e
|
/1112_set_mismatch.py
|
b9a4afe54d4b78dccd9dfe2c00c7e86f2356ed95
|
[] |
no_license
|
HaydenInEdinburgh/LintCode
|
025bb2f0d75686097061de324c0fd292536dbb14
|
dbeae2bf631e57667d1415164d452d5ca2df7447
|
refs/heads/master
| 2023-08-18T19:52:54.561623
| 2021-10-06T21:46:50
| 2021-10-06T21:46:50
| 370,733,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
class Solution:
"""
@param nums: an array
@return: the number occurs twice and the number that is missing
"""
def findErrorNums(self, nums):
# Write your code here
if not nums:
return
cnt = {}
for n in nums:
cnt[n] = cnt.get(n, 0) +1
print(cnt)
lost, dup = None, None
for i in range(1, len(nums)+1):
if i not in cnt:
lost = i
continue
if cnt[i] > 1:
dup = i
return [dup, lost]
if __name__ == '__main__':
s = Solution()
nums = [1, 1]
print(s.findErrorNums(nums))
|
[
"bony960323@gmail.com"
] |
bony960323@gmail.com
|
5969f9a3f58bcb9581bb2c8659c6b3ec621e71f5
|
995514f414eee6bbe9083ec39ecd3027cf9fd7d8
|
/3.2/14_mandelbrotTime.py
|
70eae2dba5c0725cca43753388faa48c6d600cd7
|
[] |
no_license
|
j16949/Programming-in-Python-princeton
|
e02376ebb714264a1329aacad30347e4ae79f909
|
392391b98815cc1ae2b49e1057a10bc5b37e801f
|
refs/heads/master
| 2023-08-08T11:52:04.362780
| 2023-08-01T13:02:41
| 2023-08-01T13:02:41
| 313,943,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
#-----------------------------------------------------------------------
# mandelbrot.py
#-----------------------------------------------------------------------
import sys
import stddraw
from color import Color
from picture import Picture
import complex as com
from stopwatch import Stopwatch
#-----------------------------------------------------------------------
# Compute the Mandelbrot iteration sequence starting at z0, and
# return the number of iterations for which the magnitude stays less
# than 2, up to the limit.
def mandel(z0, limit):
z = z0
for t in range(limit):
if abs(z) > 2.0:
return t
z = z * z + z0
return limit
#-----------------------------------------------------------------------
# Accept float command-line arguments xc, yc, and size that specify
# the center and size of a square region of interest. Make a digital
# image showing the result of sampling the Mandelbrot set in that
# region at a 512*512 grid of equally spaced pixels. Color each pixel
# with a grayscale value that is determined by counting the number of
# iterations before the Mandelbrot sequence for the corresponding
# complex number grows past 2.0, up to 255.
MAX = 255
#n = int(sys.argv[1])
#xc = float(sys.argv[2])
#yc = float(sys.argv[3])
#size = float(sys.argv[4])
n = 512
xc = -.5
yc = 0
size = 2
w1 = Stopwatch()
pic = Picture(n, n)
for col in range(n):
for row in range(n):
x0 = xc - (size / 2) + (size * col / n)
y0 = yc - (size / 2) + (size * row / n)
z0 = complex(x0, y0)
gray = MAX - mandel(z0, MAX)
color = Color(gray, gray, gray)
pic.set(col, n-1-row, color)
print(w1.elapsedTime())
w2 = Stopwatch()
pic = Picture(n, n)
for col in range(n):
for row in range(n):
x0 = xc - (size / 2) + (size * col / n)
y0 = yc - (size / 2) + (size * row / n)
z0 = com.Complex(x0, y0)
gray = MAX - mandel(z0, MAX)
color = Color(gray, gray, gray)
pic.set(col, n-1-row, color)
print(w2.elapsedTime())
#stddraw.setCanvasSize(n, n)
#stddraw.picture(pic)
#stddraw.show()
#-----------------------------------------------------------------------
#bai@ubuntu:~/pythonProject/princeton/3.2$ python3 14_mandelbrotTime.py
#pygame 1.9.6
#Hello from the pygame community. https://www.pygame.org/contribute.html
#5.372214317321777
#37.89339089393616
|
[
"kkwert@sina.com"
] |
kkwert@sina.com
|
8f90f0f74e299c60c30453d312fc3d1aba364719
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03013/s183344003.py
|
a8fdb4a198d1b4d4846bb4b99b7d9b53647648b2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
N, M = map(int, input().split())
X = [int(input()) for _ in range(M)]
MOD = 10 ** 9 + 7
dp = [-1] * (N + 1)
dp[0] = 1
for i in range(M):
dp[X[i]] = 0
for i in range(N):
if dp[i + 1] < 0:
if i == 0:
dp[i + 1] = dp[i]
else:
dp[i + 1] = (dp[i] + dp[i - 1]) % MOD
print(dp[-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
355aefac356edd3ca3cfe23b9b410776566b9d49
|
ff66dfb302dfdc5a519787cea8ad0ccfc2264334
|
/python/ex6_support_vector_machines/ex6_spam.py
|
2a5930371e0659dc42529d963af53411ce0ba45b
|
[
"MIT"
] |
permissive
|
ashu-vyas-github/AndrewNg_MachineLearning_Coursera
|
1c2d50e6a44e8e673203bf06a3f0165cac0a240e
|
1be5124b07df61f7295dd1c5151b86b061bf50fc
|
refs/heads/main
| 2023-07-11T14:30:52.057125
| 2021-08-17T06:04:30
| 2021-08-17T06:04:30
| 388,360,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,523
|
py
|
# Machine Learning Online Class
# Exercise 6 | Spam Classification with SVMs
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# gaussianKernel.m
# dataset3Params.m
# processEmail.m
# emailFeatures.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
import re # import regular expressions to process emails
import numpy
from scipy.io import loadmat
import svm_funcs
# ==================== Part 1: Email Preprocessing ====================
print("\nPreprocessing sample email (emailSample1.txt)\n")
# Extract Features
with open('./emailSample1.txt') as fid:
file_contents = fid.read()
word_indices = svm_funcs.process_email(file_contents, False)
# Print Stats
print('-------------')
print('Word Indices:')
print('-------------')
print(word_indices)
# ==================== Part 2: Feature Extraction ====================
print("\nExtracting features from sample email (emailSample1.txt)\n")
# Extract Features
features = svm_funcs.email_features(word_indices)
# Print Stats
print("Length of feature vector: %d" % len(features))
print("Number of non-zero entries: %d" % sum(features > 0))
# =========== Part 3: Train Linear SVM for Spam Classification ========
# Load the Spam Email dataset
# You will have X, y in your environment
data = loadmat("./spamTrain.mat")
x_train = data['X'].astype(float)
y_train = data['y']
y_train = y_train.reshape(-1)
num_examples, num_features = x_train.shape
print("Spam example Ex.6. training #examples:", num_examples, "#features:", num_features)
print("\nTraining Linear SVM (Spam Classification)")
print("This may take 1 to 2 minutes...\n")
reg_C = 0.1
model = svm_funcs.svm_train(svm_funcs.linear_kernel, x_train, y_train, reg_C, tol=1e-3, max_passes=20)
train_pred = svm_funcs.svm_predict(model, x_train) # Compute the training accuracy
train_acc = numpy.mean(train_pred == y_train)
print("Training Accuracy: %.2f" % (train_acc*100))
# =================== Part 4: Test Spam Classification ================
# Load the test dataset
# You will have Xtest, ytest in your environment
data = loadmat("./spamTest.mat")
x_test = data['Xtest'].astype(float)
y_test = data['ytest']
y_test = y_test.reshape(-1)
print("\nEvaluating the trained Linear SVM on a test set...")
test_pred = svm_funcs.svm_predict(model, x_test)
test_acc = numpy.mean(test_pred == y_test)
print("\nTest Accuracy: %.2f" % (test_acc*100))
# ================= Part 5: Top Predictors of Spam ====================
# Sort the weights and obtin the vocabulary list
# NOTE some words have the same weights, so their order might be different than in the text above
idx = numpy.argsort(model['w'])
top_idx = idx[-15:][::-1]
vocab_list = svm_funcs.get_vocab_list()
print("\nTop predictors of spam:")
print("%-15s %-15s" % ('word', 'weight'))
print("----" + " "*12 + "------")
for word, w in zip(numpy.array(vocab_list)[top_idx], model['w'][top_idx]):
print("%-15s %0.2f" % (word, w))
# # =================== Part 6: Try Your Own Emails =====================
filename = './emailSample1.txt'
with open(filename) as fid:
file_contents = fid.read()
word_indices = svm_funcs.process_email(file_contents, verbose=False)
x = svm_funcs.email_features(word_indices)
p = svm_funcs.svm_predict(model, x)
print("\nProcessed %s\nSpam Classification: %s" % (filename, 'spam' if p else 'not spam'))
|
[
"ashutoshavyas@gmail.com"
] |
ashutoshavyas@gmail.com
|
84406411b9aca252144887dbe784d8a128e5029b
|
115ef7a9ffc88148b7439bd25ef3c97720be87e6
|
/Backtester_v2.0/2.0/correlation.py
|
421ae0d95b0fdcf95cf4f52eb0b2f350f8de07db
|
[
"MIT"
] |
permissive
|
octicalpha/billions
|
387bc0db600dd97915be0cece710237ff626b86c
|
5465c527d614ae64789906197c1effe7ba94d373
|
refs/heads/master
| 2020-04-01T21:35:50.582694
| 2018-10-14T05:36:50
| 2018-10-14T05:36:50
| 153,664,919
| 0
| 3
| null | 2018-10-18T17:53:35
| 2018-10-18T17:53:34
| null |
UTF-8
|
Python
| false
| false
| 3,929
|
py
|
import pandas as pd
import numpy as np
import os
import re
from matplotlib import pyplot as plt
import argparse
class Corr(object):
def __init__(
self, pnl_dir_path, pnl_filepath, pos_dir_path, pos_filepath, range_
):
self.meta_info = {
"pnl": {
"dir_path": pnl_dir_path,
"filepath": pnl_filepath,
"range": 0
},
"pos": {
"dir_path": pos_dir_path,
"filepath": pos_filepath,
"range": range_
}
}
self.pnl_df, self.pnl_se = self.get_sequences("pnl")
self.pos_df, self.pos_se = self.get_sequences("pos")
self.result_dict = {
"PNL": self.get_corr_tuples(self.pnl_df, self.pnl_se),
"POS": self.get_corr_tuples(self.pos_df, self.pos_se)
}
def get_sequences(self, scope):
def get_filepaths():
p = re.compile(".*_"+scope+".csv")
all_names = os.listdir(dir_path)
filenames = sum([p.findall(s) for s in all_names], [])
return [os.path.join(dir_path, s) for s in filenames]
def get_se(filepath):
p = re.compile(".*/(.*)_"+scope+".csv")
df = pd.read_csv(filepath, index_col=0)[-range_:]
se = df["CumPnL"] if scope == "pnl" else df.mean()
se.name = p.findall(filepath)[0]
return se
meta = self.meta_info
dir_path = meta[scope]["dir_path"]
pivot_filepath = meta[scope]["filepath"]
range_ = meta[scope]["range"]
filepaths = get_filepaths()
pivot_se = get_se(pivot_filepath)
ses = [get_se(filepath) for filepath in filepaths]
df = pd.concat(ses, axis=1).drop([pivot_se.name], axis=1)
return df, pivot_se
def get_corr_tuples(self, sequence_df, pivot_se):
def calc_corr(name):
ses = [pivot_se, sequence_df[name]]
df = pd.concat(ses, axis=1).dropna()
corr_mat = np.corrcoef(df[pivot_se.name], df[name])
return corr_mat[1, 0]
corr_dict = {name: calc_corr(name) for name in sequence_df.columns}
names = sorted(sequence_df.columns, key=lambda name: corr_dict[name])
corr_tuples = [(name, corr_dict[name]) for name in names]
return corr_tuples
def display(self, scope):
def draw_hist():
plt.hist(map(lambda x: x[1], corr_tuples))
plt.show()
def _make_it_readable(t):
name = t[0]+" "*(max_name_length-len(t[0]))
corr =str(t[1])
return name + " | " + corr
def print_report():
print(scope+" Max 5")
for t in corr_tuples[:-6:-1]:
print(_make_it_readable(t))
print(scope+" Min 5")
for t in corr_tuples[:5]:
print(_make_it_readable(t))
corr_tuples = self.result_dict[scope]
max_name_length = max(map(lambda x: len(x[0]), corr_tuples))
draw_hist()
print_report()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pnl_dir', type=str)
parser.add_argument('--pnlfile', type=str)
parser.add_argument('--pos_dir', type=str)
parser.add_argument('--posfile', type=str)
parser.add_argument('--range', type=int)
args = parser.parse_args()
INTERVAL = args.interval
PNL_DIR_PATH = args.pnl_dir
PNL_FILEPATH = args.pnlfile
POS_DIR_PATH = args.pos_dir
POS_FILEPATH = args.posfile
RANGE = args.range
corr = Corr(PNL_DIR_PATH, PNL_FILEPATH, POS_DIR_PATH, POS_FILEPATH, RANGE)
corr.display("PNL")
corr.display("POS")
|
[
"ubuntu@ip-172-31-26-154.ap-northeast-2.compute.internal"
] |
ubuntu@ip-172-31-26-154.ap-northeast-2.compute.internal
|
062d988ee4c6b9c74fee068ea1501c5cbc67ff6f
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/2501-3000/2518.Number of Great Partitions.py
|
cb48ab1fc9ca60491f8fa5cb5332230e804326f0
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062
| 2023-08-27T07:59:16
| 2023-08-27T07:59:16
| 57,526,914
| 69
| 9
| null | 2023-08-20T06:34:41
| 2016-05-01T05:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
from functools import cache
MOD = 10 ** 9 + 7
class Solution:
def countPartitions(self, nums: List[int], k: int) -> int:
prefix_sum = [0] * len(nums)
for i in range(len(nums)):
prefix_sum[i] = nums[i] + prefix_sum[i - 1]
if prefix_sum[-1] < k * 2:
return 0
@cache
def dp(i, need1, need2):
if i < 0 and max(need1, need2) > 0:
return 0
if max(need1, need2) > prefix_sum[i]:
return 0
if need1 == need2 == 0:
return pow(2, (i + 1), MOD)
if need1 > need2:
return dp(i, need2, need1)
return (dp(i - 1, max(0, need1 - nums[i]), need2) + dp(i - 1, need1, max(0, need2 - nums[i]))) % MOD
return dp(len(nums) - 1, k, k)
|
[
"skw_kevin@126.com"
] |
skw_kevin@126.com
|
9c05be7ad041f9de5e1d1bce2dd31350714c4619
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/386/usersdata/353/92377/submittedfiles/ep1.py
|
10718f855550587ca2a4c168c199e31760a04b62
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
# -*- coding: utf-8 -*-
'''
/**********************************************************/
/* Equipe: Igor Emanuel Lucas Farias, Victória Cruz Gouveia */
/* N ́umero de matriculas: 407553, 407582 */
/* Exercicio-Programa 1 -- Ra ́ızes de Equa ̧c~oes Quadr ́aticas */
/* ECI0007 ou EM0006 (EC/EM) -- 2017 -- Professor:Rafael */
/* Interpretador: Python vers~ao 3 */
/**********************************************************
'''
#COMECE SEU CODIGO NA LINHA ABAIXO.
def raiz2(x,epsilon):
rn=x
while True:
rm=(1/2)*(rn + (x/rn))
if abs (rm-rn)<epsilon:
return(rm)
rn=rm
def baskara(a,b,c):
delta=(b**2) - 4*a*c
if delta>=0:
x1=((-b)+(raiz2(delta,epsilon)))/(2*a)
x2=((-b)-(raiz2(delta,epsilon)))/(2*a)
if delta>0:
return('reais simples', '%2.0f'%x1, '%2.0f'%x2)
elif delta==0:
return('real dupla', '%2.0f'%x1, '%2.0f'%x2)
else:
delta=delta*(-1)
x3=((raiz2(delta,epsilon))/(2*a))
x1=((-b)/(2*a))
x2=((-b)/(2*a))
return('complexas', complex('%4.0f'%x1,'%4.0f'%x3), complex('%4.0f'%x2,'%4.0f'%x3))
epsilon=float(input('Digite o epsilon de controle: '))
nequacoes=int(input('Digite o número de equações: '))
for equação in range(0,nequacoes,1):
a=float(input('Digite o a da equação: '))
b=float(input('Digite o b da equação: '))
c=float(input('Digite o c da equação: '))
if a!=0:
print('%.2f'%a), print('%.2f'%b), print('%.2f'%c), print(baskara(a,b,c))
else:
print('***ERRO: equação não é do segundo grau! ***')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
8843b68fa4debf3686b1f6037b8afaea4ee71840
|
b739fefa06d46a60fe053f7fe0fe2c62a52242b2
|
/pages/admin/edit_news_page.py
|
1417cc238ade39fddacc51eaa0b28d1870ddfc0f
|
[] |
no_license
|
icorso/gkr-web-tests
|
c59d3b0f7e371e887c6699cd09b6a87a71dd762e
|
fdf25ad700d75230f1af74b646a6a8a18f3d0b18
|
refs/heads/master
| 2021-01-13T05:15:37.913759
| 2017-02-08T07:44:04
| 2017-02-08T07:44:04
| 81,296,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
# coding=utf-8
from pages import page, Input, Checkbox, BasePage, BaseElement, By
from pages.gkr_page import GkrPage
@page(u"Форма редактирования новости", By.XPATH, "//form[..//child::*[contains(text(),'Редактирование новости')]]")
class EditNewsPage(GkrPage):
TITLE = BaseElement("Поле 'Заголовок новости'", By.ID, "title")
DESC = BaseElement("Поле 'Текст новости'", By.ID, "text")
PUBLISH_DATE = Input("Поле 'Дата создания'", By.XPATH, ".//*[child::*[contains(text(),'Дата')]]")
IS_PUBLISHED = Checkbox("Чекбокс 'Опубликовать'", By.XPATH, ".//input[@type='checkbox']")
SUBMIT = BaseElement("Кнопка 'Создать'", By.XPATH, ".//button[@type='submit']")
ERROR = BaseElement("Сообщение об ошибке", By.XPATH, ".//span[contains(@style,'ff0000')]")
|
[
"icorso@yandex.ru"
] |
icorso@yandex.ru
|
dad102cb3faea7b8d1d9c97f542f4ce326fe7d2f
|
3c44ddbe867d953a5f27c8c073e1ea5e995b5873
|
/experiments/experiment_1/debugger.py
|
7a9f1bcd3dee99eca66c48d12ca62b8f83a43330
|
[] |
no_license
|
cair/deep-warehouse
|
37f6a3510638b36c276abb62b6b770d0ba6186af
|
93cb7329c28733083b48ab6afd3de91676852175
|
refs/heads/master
| 2022-03-10T16:45:59.553325
| 2022-02-20T17:28:19
| 2022-02-20T17:28:19
| 167,932,576
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
import sys
from deep_logistics.scheduler import OnDemandScheduler
from deep_logistics.spawn_strategy import LocationSpawnStrategy
from experiments.experiment_3.state_representations import State0
sys.path.append("/home/per/GIT/deep-logistics")
sys.path.append("/home/per/IdeaProjects/deep_logistics")
sys.path.append("/home/per/GIT/code/deep_logistics")
sys.path.append("/root")
from deep_logistics.environment import Environment
from deep_logistics.agent import InputAgent
if __name__ == "__main__":
env = Environment(
height=5,
width=5,
depth=3,
ups=None,
ticks_per_second=1,
taxi_n=1,
taxi_agent=InputAgent,
taxi_respawn=False,
taxi_control="constant",
scheduler=OnDemandScheduler,
delivery_locations=None,
spawn_strategy=LocationSpawnStrategy,
graphics_render=True,
graphics_tile_height=64,
graphics_tile_width=64
)
env.deploy_agents()
env.task_assignment()
state = State0(env)
agent = env.agents[0]
def on_event():
env.update()
y = state.generate(agent)
print(" - ".join([str(x) for x in y]))
agent.add_event_callback(on_event)
while True:
agent.automate()
env.render()
|
[
"per@sysx.no"
] |
per@sysx.no
|
a71cc98489fc1998a280603110b534f157129380
|
1a1b7f607c5e0783fd1c98c8bcff6460e933f09a
|
/core/charge/charge_types.py
|
6f4f6bbf937df689e1cbbdc057d1a0b96b383e91
|
[] |
no_license
|
smrmohammadi/freeIBS
|
14fb736fcadfaea24f0acdafeafd2425de893a2d
|
7f612a559141622d5042614a62a2580a72a9479b
|
refs/heads/master
| 2021-01-17T21:05:19.200916
| 2014-03-17T03:07:15
| 2014-03-17T03:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
from core.charge.internet_charge import InternetCharge
from core.charge.voip_charge import VoipCharge
from core.charge.internet_charge_rule import InternetChargeRule
from core.charge.voip_charge_rule import VoipChargeRule
from core.lib.time_lib import *
from core.ibs_exceptions import *
def getChargeClassForType(_type):
if _type=="Internet":
return InternetCharge
elif _type=="VoIP":
return VoipCharge
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
def getRulesTable(_type):
"""
return table that rules of _type charge_obj is available there
rule tables are diffrent based on charge type
"""
if _type=="Internet":
return "internet_charge_rules"
elif _type=="VoIP":
return "voip_charge_rules"
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
def getChargeRuleObjForType(_type,rule_info,charge_obj,day_of_weeks,ports):
if _type=="Internet":
return InternetChargeRule(rule_info["charge_rule_id"],charge_obj,rule_info["cpm"],rule_info["cpk"],day_of_weeks,\
rule_info["start_time"],rule_info["end_time"],rule_info["bandwidth_limit_kbytes"],\
rule_info["bw_transmit_leaf_id"],rule_info["bw_receive_leaf_id"],rule_info["assumed_kps"],\
rule_info["ras_id"],ports)
elif _type=="VoIP":
return VoipChargeRule(rule_info["charge_rule_id"],charge_obj,\
day_of_weeks,rule_info["start_time"],rule_info["end_time"], \
rule_info["tariff_id"],rule_info["ras_id"],ports)
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
|
[
"farshad_kh"
] |
farshad_kh
|
ec5e8b11caa32c3c05e9e790c8640c5854a59efe
|
308953409e1a3b828ac49b7301c1e751cbf762cf
|
/suite_ERC113C/tst_Offline_Value_Verification_After_Export/test.py
|
0b61d6d28c8a932c2629a09e7db845a7ea357bd3
|
[] |
no_license
|
asthagaur1/danfoss-automation
|
4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e
|
213a99d3375889cd0e0c801421a50e9fe6085879
|
refs/heads/main
| 2023-03-31T23:26:56.956107
| 2021-04-01T08:52:37
| 2021-04-01T08:52:37
| 353,627,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
def main():
excel = r"C:\gitworkspace\TestAutomation-AKCC5XX\Test_Automation\SourceCode\Test_Suites\suite_ERC113C\shared\testdata\Offline_Export_Verifying_Values.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
source(findFile("scripts", "object_id.py"))
keyAction(excel)
|
[
"asthagaur@danfoss.com"
] |
asthagaur@danfoss.com
|
693bf5679a15c573e4ebe87b0a134e654d96be1a
|
bdda458001808a029b171c09286f022a1384d180
|
/crm/api/urls.py
|
4432114d1a628d5ae2367f08e965ea609d506cbb
|
[] |
no_license
|
bianchimro/crm-django
|
4189f5c0c31f03d23a2b644a14403d63b8efdf0a
|
d8e4d18174cb050fd7a22d53fe8bb152e6e43120
|
refs/heads/master
| 2021-04-27T15:15:28.219887
| 2018-02-22T16:51:00
| 2018-02-22T16:51:00
| 122,466,604
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.urls import path
from .views import ExampleView, AziendaList, AziendaViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'aziende', AziendaViewSet)
urlpatterns = [
path('example/', ExampleView.as_view(), name="example"),
path('aziende_list/', AziendaList.as_view(), name="aziende_list"),
]
urlpatterns += router.urls
|
[
"bianchimro@gmail.com"
] |
bianchimro@gmail.com
|
8a86a57c6ba570a80e5a56773f4aacac0bdfff77
|
cc64b1b5deb4530a5bd3eaabd98ebd4daa2deea1
|
/Aulas/Exercícios-Mundo2/Aula014/Ex064.py
|
c503ec40e5b592f3dae6498fbbfd25ca252956e6
|
[
"MIT"
] |
permissive
|
Sofista23/Aula1_Python
|
239b9920353138ff99d99dd0af66a4788f1cbb22
|
129132d977058ac6f23cc95c7bb8b55d8a1bb429
|
refs/heads/main
| 2023-09-01T23:55:20.529528
| 2021-10-13T23:19:33
| 2021-10-13T23:19:33
| 416,924,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
n=0
s=0
q=0
while n != 999:
n=int(input("Digite um número:"))
if n != 999:
s += n
q += 1
print("A soma de todos os números é {0}.".format(s))
print("A quantidade de números digitados foi de {0}.".format(q))
|
[
"81760467+Sofista23@users.noreply.github.com"
] |
81760467+Sofista23@users.noreply.github.com
|
ff25ffd31d796bc554c0a32e2113e547c0222a62
|
cbd9b8f2dbd692d74eba6e92465e5f1dc1a807b3
|
/ukpopulation/myedata.py
|
33de0fab035316ed548c79e4507c2972d4735391
|
[
"MIT"
] |
permissive
|
geoadom/ukpopulation
|
5a99f02c06c2b76464df2508a8f01f0b9ab0a803
|
bfbd55097a4e9f458e2da6673a83576e37f5079b
|
refs/heads/master
| 2020-03-21T07:59:37.195042
| 2018-06-21T15:02:31
| 2018-06-21T15:02:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,297
|
py
|
"""
MYEData - wrapper around Mid-Year Estimate data by LAD, SYoA and gender
"""
import pandas as pd
import ukcensusapi.Nomisweb as Api
import ukpopulation.utils as utils
class MYEData:
"""
Functionality for downloading and collating UK mid-year estimate (MYE) data
Nomisweb stores the data for the entire UK, from 1991-2016 inclusive
"""
# update as and when necessary (this is unlike (S)NPP where we query the data for the year range)
# the data is stored differently at nomisweb (year is part of the query)
MIN_YEAR = 1991
MAX_YEAR = 2016
def __init__(self, cache_dir=None):
if cache_dir is None:
cache_dir = utils.default_cache_dir()
self.cache_dir = cache_dir
self.data_api = Api.Nomisweb(self.cache_dir)
# store as a dictionary keyed by year (lazy retrieval)
self.data = {}
def min_year(self):
"""
Returns the first year in the data
"""
return MYEData.MIN_YEAR
def max_year(self):
"""
Returns the final year in the data
"""
return MYEData.MAX_YEAR
# TODO functionality for easy aggregration to E/W/EW/S/GB/NI/UK
def filter(self, years, geogs, ages=range(0,91), genders=[1,2]):
"""
Get MYE detailed data for a given year
"""
# ensure array inputs
if isinstance(years, int):
years = [years]
if isinstance(geogs, str):
geogs = [geogs]
if isinstance(ages, int):
ages = [ages]
if isinstance(genders, int):
genders = [genders]
result = pd.DataFrame()
for year in years:
# ensure the data is loaded
self.__fetch_data(year)
## ensure we return a copy!
part = self.data[year][(self.data[year].GEOGRAPHY_CODE.isin(geogs)) &
(self.data[year].C_AGE.isin(ages)) &
(self.data[year].GENDER.isin(genders))].copy()
part["PROJECTED_YEAR_NAME"] = year
result = result.append(part)
return result.reset_index(drop=True)
def aggregate(self, years, geog_codes, categories, ages=range(0,91), genders=[1,2]):
data = self.filter(years, geog_codes, ages, genders)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
def __fetch_data(self, year):
"""
Gets Mid-year population estimate data for a given year
Data is by single year of age by gender by local authority
"""
# if data already loaded return
if year in self.data:
return
table_internal = "NM_2002_1" # 2016-based MYE
query_params = {
"gender": "1,2",
"c_age": "101...191",
"MEASURES": "20100",
"select": "geography_code,gender,c_age,obs_value",
"geography": "1879048193...1879048573,1879048583,1879048574...1879048582"
}
if year < MYEData.MIN_YEAR or year > MYEData.MAX_YEAR:
raise ValueError("{} is outside the available years for MYE data ({}-{})".format(year, MIN_YEAR, MAX_YEAR))
query_params["date"] = "latest"
if year < MYEData.MAX_YEAR:
query_params["date"] += "MINUS" + str(2016-year)
self.data[year] = self.data_api.get_data(table_internal, query_params)
# renumber age so that 0 means [0,1)
self.data[year].C_AGE -= 101
return self.data[year]
|
[
"a.p.smith@leeds.ac.uk"
] |
a.p.smith@leeds.ac.uk
|
ddc2a74972b55f12dc91815381d50be81c7ebf36
|
ba0a2b0d2d1534443ea34320675aadfa378457b6
|
/Array/Q1267_Count Servers that Communicate.py
|
bd2da1840b785f2af80b49ee05d2babff826b118
|
[] |
no_license
|
Luolingwei/LeetCode
|
73abd58af116f3ec59fd6c76f662beb2a413586c
|
79d4824879d0faed117eee9d99615cd478432a14
|
refs/heads/master
| 2021-08-08T17:45:19.215454
| 2021-06-17T17:03:15
| 2021-06-17T17:03:15
| 152,186,910
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# 思路: 计算每行每列的server个数,如果一个server与其他server连接,则该行或该列server个数大于1
class Solution:
# O(mn)
def countServers(self, grid):
res=0
m,n=len(grid),len(grid[0])
row,col=list(map(sum,grid)),list(map(sum,zip(*grid)))
for i in range(m):
for j in range(n):
if grid[i][j] and (row[i]>1 or col[j]>1):
res+=1
return res
a=Solution()
print(a.countServers([[1,0],[0,1]]))
|
[
"564258080@qq.com"
] |
564258080@qq.com
|
c7e969446f55baab789853c7141aee407cfb5de5
|
b2403817f9221ee3550130572a808194ef4f3fda
|
/Excersise/DiscoverMonk.py
|
d83e2599dd6f66c342a4d3eafec2a9d392354bbd
|
[] |
no_license
|
xaviergoby/Python-Data-Structure
|
e962444ef5b1313c3facbf1fcc315af182b73a26
|
eaaf31ea98d63e812a75c1d6ecb8722b9c0cf142
|
refs/heads/master
| 2020-04-13T00:24:40.896592
| 2018-11-27T11:51:36
| 2018-11-27T11:51:36
| 162,844,732
| 1
| 0
| null | 2018-12-22T21:46:29
| 2018-12-22T21:46:29
| null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
__author__ = 'Sanjay'
def monk(n, args = []):
someArray = range(0,50,10)
for i in args:
if i in someArray:
print ("YES")
else:
print ("NO")
if __name__ == '__main__':
someList = range(0,100,10)
monk(len(someList), someList)
|
[
"sanjay.siddha3@gmail.com"
] |
sanjay.siddha3@gmail.com
|
8453de52904329d3aaf8fd34272d1308db93de74
|
7d283f0762d2f85b04148e5db5acebc44dbba606
|
/get_post_info_dl.py
|
89d24f3e55214c8c843896a14624df05bc0e6664
|
[] |
no_license
|
Brandon-Valley/reddit_comp
|
50dcd3571be1116bebb607c54e105096078c161f
|
ec618dc12b007a670fb4cc879554c4cf41796b62
|
refs/heads/master
| 2022-01-10T19:18:18.042008
| 2019-06-02T18:30:11
| 2019-06-02T18:30:11
| 188,881,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,408
|
py
|
import subprocess
import json
import file_system_utils
# optional arguments:
# -h, --help show this help message and exit
# --directory DIRECTORY, -d DIRECTORY
# Specifies the directory where posts will be downloaded
# to
# --NoDownload Just gets the posts and stores them in a file for
# downloading later
# --verbose, -v Verbose Mode
# --quit, -q Auto quit afer the process finishes
# --link link, -l link Get posts from link
# --saved Triggers saved mode
# --submitted Gets posts of --user
# --upvoted Gets upvoted posts of --user
# --log LOG FILE Takes a log file which created by itself (json files),
# reads posts and tries downloading them again.
# --subreddit SUBREDDIT [SUBREDDIT ...]
# Triggers subreddit mode and takes subreddit's name
# without r/. use "frontpage" for frontpage
# --multireddit MULTIREDDIT
# Triggers multireddit mode and takes multireddit's name
# without m/
# --user redditor reddit username if needed. use "me" for current user
# --search query Searches for given query in given subreddits
# --sort SORT TYPE Either hot, top, new, controversial, rising or
# relevance default: hot
# --limit Limit default: unlimited
# --time TIME_LIMIT Either hour, day, week, month, year or all. default:
# all
EXE_PATH = "C:/Users/Brandon/Documents/Personal_Projects/reddit_comp/bulk_downloader_for_reddit-1.6.5-windows/bulk-downloader-for-reddit.exe "
LOG_FILES_SAVE_PATH = 'bulk_download_log_files'
DEFAULT_SORT_TYPE = 'hot'
def build_arg_str(num_posts, subreddit_l, sort_type = DEFAULT_SORT_TYPE):
# build_subreddit_l_str
subreddit_l_str = subreddit_l[0]
for subreddit in subreddit_l[1:]:
subreddit_l_str += '+' + subreddit
args = [' --directory ' + LOG_FILES_SAVE_PATH,
' --subreddit ' + subreddit_l_str,
' --limit ' + str(num_posts),
' --sort ' + sort_type,
' --NoDownload'
]
#build arg_str
arg_str = ''
for arg in args:
arg_str += arg
return arg_str
def build_post_info_dl_from_json():
#get path to most recent json logfile
newest_log_file_dir = file_system_utils.get_newest_file_path(LOG_FILES_SAVE_PATH + '/LOG_FILES')
json_file_path = newest_log_file_dir + '/POSTS.json'
post_info_dl = []
# read in json file
with open(json_file_path) as json_file:
data = json.load(json_file)
# fill post_info_dl
post_num = 1
while(str(post_num) in data):
post_info_dl.append(data[str(post_num)][0])
post_num += 1
return post_info_dl
def get_post_info_dl(num_posts, subreddit_list, quick_test = False):
if quick_test == False:
exe_arg_str = build_arg_str(num_posts, subreddit_list)
cmd = EXE_PATH + exe_arg_str
subprocess.call(cmd, shell=True)
post_info_dl = build_post_info_dl_from_json()
return post_info_dl
# print( get_post_info_dl(4, ['videomemes', 'pics']))
|
[
"bavvh8@mst.edu"
] |
bavvh8@mst.edu
|
4d432a747f4aa1931de0a032b8c48e1b17d77e38
|
b7add0d1b1effc50b27d3316fa5889a5227e5b19
|
/Micropython/PYCARD/tests/archieved/hall_encoder_test_2.py
|
30c908f586454c75ba6d4044967b71489fa1d5d2
|
[] |
no_license
|
Woz4tetra/Atlas
|
efb83a7c7b2698bf8b36b023f7aa573cc38284f6
|
c7380868a9efef9d1594ed7aa87187f03a7e4612
|
refs/heads/master
| 2020-04-04T06:25:50.657631
| 2017-04-05T01:53:15
| 2017-04-05T01:53:15
| 50,269,756
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
import pyb
from objects import HallEncoder
pin = "X11"
mode = ""
while mode != "e" and mode != "r":
mode = input("Raw or encoder counts (r or e)?\n> ").lower()
if mode == "e":
encoder = HallEncoder(0, pin, 80, 100)
while True:
if encoder.recved_data():
print(encoder.enc_dist, encoder.hall_value)
pyb.delay(100)
elif mode == "r":
pin_ref = pyb.ADC(pyb.Pin(pin, pyb.Pin.ANALOG))
while True:
print(pin_ref.read())
pyb.delay(40)
|
[
"woz4tetra@gmail.com"
] |
woz4tetra@gmail.com
|
81db93b7457b194f44a05dbeb19bfbb07cbf8c1a
|
6dedf9401746e934e87698f58eedb4631ea5f81b
|
/scrapy_demo/tesseract/demo.py
|
5fe50242cefbd8841af0687901b25feedf89c539
|
[] |
no_license
|
2415970940/scrapy
|
0d03f9fe0cf121f637b7c5b03d328453c0ae35a7
|
5a1f4da0d92b3d80c1b95ed9fff0ab4bd02660bd
|
refs/heads/master
| 2020-03-23T13:13:50.282118
| 2018-08-02T09:13:24
| 2018-08-02T09:13:24
| 141,607,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import pytesseract
from PIL import Image
pytesseract.pytesseract.tesseract_cmd = r"G:\progamapp\Tesseract-OCR\tesseract.exe"
tessdata_dir_config = '--tessdata-dir "G:\\progamapp\\Tesseract-OCR\\tessdata"'
image = Image.open("test.png")
text = pytesseract.image_to_string(image,config=tessdata_dir_config)
print(text)
|
[
"2415970940@qq.com"
] |
2415970940@qq.com
|
7c7fc0f5567572ab0c07b65e752550907fbbcd9e
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_coadd.py
|
4c2a173777392d114cb21ec9fb51411e53878edf
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[207.933875,12.7155], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
ed785e2b5af05ae400c0d35d937f9c33162497ac
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC59.py
|
2688aa43ef87eefbaedb8591966863e235cc4be4
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC59.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
b80afaa7949dc6770d059a0287bbc3a17570e136
|
ac09dbe531660a8f5ea6d0ab9b9496db856fe88e
|
/rules/download_data.smk
|
bd1e90cfe76baacaac35853c849070aadbdbe100
|
[
"MIT"
] |
permissive
|
EthanHolleman/GLOE-reps
|
fc203c0676d474e1477318c18251eb0ff1ac0fc3
|
196e98eb8cf5fb591ae02b5e999d3562c46b81c2
|
refs/heads/main
| 2023-04-09T18:14:23.009901
| 2021-04-15T01:53:33
| 2021-04-15T01:53:33
| 354,414,057
| 0
| 0
|
MIT
| 2021-04-04T17:57:18
| 2021-04-03T23:18:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,745
|
smk
|
import pandas as pd
GLOE_SAMPLES = pd.read_csv(
'samples/GLOE_samples.csv', sep=','
).set_index('Sample Name', drop=False)
# Download GLOE-seq data and process into fastq
rule expand_gloe_samples:
input:
expand('rawdata/GLOE-seq/{sample_name}.sra', sample_name=GLOE_SAMPLES['Sample Name'])
rule download_all_gloe_samples:
conda:
'../envs/sra-toolkit.yml'
params:
sra_accession = lambda wildcards: GLOE_SAMPLES.loc[wildcards.sample_name]['Run'],
output:
temp('rawdata/GLOE-seq/{sample_name}.sra')
shell:'''
prefetch {params.sra_accession} --output-file {output}
'''
rule dump_gloe_fastq:
input:
'rawdata/GLOE-seq/{sample}.sra'
output:
'rawdata/GLOE-seq/{sample}.fastq.gz'
shell:'''
fastq-dump -Z {input} | gzip > {output}
'''
# Download primers
rule download_primer_file:
output:
'rawdata/primers/TruSeq3-SE.fa'
shell:'''
curl https://raw.githubusercontent.com/timflutre/trimmomatic/master/adapters/TruSeq3-SE.fa \
-o {output}
'''
rule download_hg19_chr_sizes:
output:
'rawdata/hg19/hg19.chrom.sizes'
shell:'''
curl -L http://hgdownload.cse.ucsc.edu/goldenpath/hg19/bigZips/hg19.chrom.sizes -o {output}
'''
# Download footloop data
rule download_footloop_all:
output:
'rawdata/footloop/footloop_all.bed'
shell:'''
curl -L "https://genome.ucsc.edu/cgi-bin/hgTables?hgsid=1079385889_dXqdbBP5Hsal2siu4fVmefmsWOgX&boolshad.hgta_printCustomTrackHeaders=0&hgta_ctName=tb_ct_footLoopPeakALL_41&hgta_ctDesc=table+browser+query+on+ct_footLoopPeakALL_41&hgta_ctVis=pack&hgta_ctUrl=&fbQual=whole&fbUpBases=200&fbDownBases=200&hgta_doGetBed=get+BED" -o {output}
'''
|
[
"etholleman@ucdavis.edu"
] |
etholleman@ucdavis.edu
|
126b2a8c33e68656967eae19d2378d22a7ea2628
|
242918b007e06cbebbf5b276580a0ed89d0020fa
|
/thrift/compiler/test/fixtures/inheritance/gen-py3/module/services.pyi
|
f696015594a83586fbfc4424fc7df5a066a60980
|
[
"Apache-2.0"
] |
permissive
|
wby-test/fbthrift
|
2a16ce45d94d3961936300f5b81098cf345360e6
|
8c50c4ac75ec16ebcd2485ca886b49d3ea55515e
|
refs/heads/master
| 2022-08-03T08:30:40.931712
| 2017-11-28T06:28:36
| 2017-11-28T06:34:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
pyi
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import typing as _typing
from thrift.py3.server import RequestContext, ServiceInterface
import module.types as _module_types
class MyRootInterface(
ServiceInterface
):
@_typing.overload
async def do_root(
self,
ctx: RequestContext
) -> None: ...
async def do_root(
self
) -> None: ...
class MyNodeInterface(
_module_services.MyRootInterface
):
@_typing.overload
async def do_mid(
self,
ctx: RequestContext
) -> None: ...
async def do_mid(
self
) -> None: ...
class MyLeafInterface(
_module_services.MyNodeInterface
):
@_typing.overload
async def do_leaf(
self,
ctx: RequestContext
) -> None: ...
async def do_leaf(
self
) -> None: ...
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
09f0bceefb59bb73736f9993fabbf1ed9b32640e
|
9766c2e479e99cca5bf7cc834c949fc4d5286275
|
/SRC/engine/element.spy
|
c45a0915e20516a294ba71e27364b627bf1d54c2
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
UstbCmsPjy/OOF2
|
4c141e8da3c7e3c5bc9129c2cb27ed301455a155
|
f8539080529d257a02b8f5cc44040637387ed9a1
|
refs/heads/master
| 2023-05-05T09:58:22.597997
| 2020-05-28T23:05:30
| 2020-05-28T23:05:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
spy
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
def _Element__position(self, coords):
return map(self.from_master, coords)
ElementPtr.position = _Element__position
from ooflib.SWIG.engine.masterelement import MasterElementPtr
from ooflib.SWIG.common.coord import CoordPtr
from ooflib.SWIG.engine.mastercoord import MasterCoordPtr
from ooflib.SWIG.engine.edge import BoundaryEdgePtr
|
[
"lnz5@rosie.nist.gov"
] |
lnz5@rosie.nist.gov
|
8d6306d191fe7def474de55a4085373419f1808d
|
b9dcea5142af620b651fdfac05ffcac021ef6f83
|
/heroku_deploy/settings.py
|
0c8efb0f8c32a6b2ddba5192ab7b8e4fb168aedd
|
[] |
no_license
|
Jordan-Ak/heroku_deployment
|
aee7c1f721a51831329fef244a48fc7d65c3d0fc
|
ada1568a1c120903e400ba190f9a8fac3fb86d77
|
refs/heads/master
| 2023-03-29T12:16:30.680308
| 2021-02-23T08:16:58
| 2021-02-23T08:16:58
| 339,159,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,474
|
py
|
"""
Django settings for heroku_deploy project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kg%s)6nnp0+b%k=i7e3xgjawp16z3=9@x(_m#_(_s=40$g5m*1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['safe-chamber-01830.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#My apps
'deploys',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heroku_deploy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heroku_deploy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
#Database configuration
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
|
[
"JosiahDavid98@gmail.com"
] |
JosiahDavid98@gmail.com
|
31061a65eab30817d6bac7d2d11b6ae2e84bf634
|
e6f73cc3398050b23df28e3f11a10afbb46ee38b
|
/idea/chain_iter/great.py
|
9ef7b72eb2ef2aebef5e53d23cfc96e52856e86a
|
[] |
no_license
|
monarin/divelite
|
3db262bf07a0de870d0bfe650ebdf21225b88c1b
|
0d297bda7368c5295336565431fbfa18a5686f15
|
refs/heads/master
| 2023-06-29T23:42:34.541874
| 2023-06-08T17:59:59
| 2023-06-08T17:59:59
| 120,695,376
| 0
| 1
| null | 2018-09-06T00:03:30
| 2018-02-08T01:41:19
|
C++
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
class Step:
def __init__(self,chunks):
self.chunks = chunks
def events(self):
for i, chunk in enumerate(chunks):
print(f'events() chunk {i}')
for dg in chunk:
if dg==102: return
if dg<100: yield dg
chunks = [iter([101,1,2,3,102,101,4,5,102,101,6]),iter([7,8,102,101,9,10])]
class Run:
def __init__(self):
pass
def events(self):
for chunk in chunks:
for dg in chunk:
if dg<100: yield dg
def steps(self):
for chunk in chunks:
for dg in chunk:
if dg==101: yield Step(chunks)
myrun = Run()
#for evt in myrun.events():
# print(evt)
for istep,step in enumerate(myrun.steps()):
print('step:',istep)
for evt in step.events():
print(evt)
|
[
"monarin@gmail.com"
] |
monarin@gmail.com
|
f2b92c95db0379a8834ace8efc29165dfbec2f75
|
6569f43b525305a8899b920b8e58aab413feb519
|
/CommitteApp/migrations/0001_initial.py
|
b35fc854aca31273b4892e95c7c63a3797207735
|
[] |
no_license
|
sontus-tripura-python/tsfbd
|
daa6b19f2dae8eaf8fd9c5a5c412d7cc9606a381
|
5f851c2616e912d0af1addaaeb8e64167eed9501
|
refs/heads/main
| 2023-04-25T08:36:59.288577
| 2021-05-07T05:13:28
| 2021-05-07T05:13:28
| 242,639,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,016
|
py
|
# Generated by Django 3.1.5 on 2021-03-26 13:47
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BranchCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
],
),
migrations.CreateModel(
name='BranchName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('branchname', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='branchname')),
('branch_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='branch_categories', to='CommitteApp.branchcategory')),
],
options={
'verbose_name_plural': 'Branch Name',
},
),
migrations.CreateModel(
name='CentralYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('yearname', models.CharField(max_length=30)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='yearname')),
],
options={
'verbose_name_plural': 'central year',
},
),
migrations.CreateModel(
name='Coordinator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='branchmember')),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=200)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('about_description', models.TextField()),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
],
options={
'verbose_name_plural': 'Co-ordinator',
},
),
migrations.CreateModel(
name='CentralMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='central')),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=50)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('village', models.CharField(blank=True, max_length=200)),
('thana', models.CharField(blank=True, max_length=200)),
('district', models.CharField(blank=True, max_length=200)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], default='Male', max_length=20)),
('current_enroll', models.CharField(choices=[('University', 'University'), ('College', 'College'), ('School', 'School'), ('Job', 'Job'), ('Other', 'Other')], max_length=200, null=True)),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
('session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.centralyear')),
],
options={
'verbose_name_plural': 'Central Member',
},
),
migrations.CreateModel(
name='BranchYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('branchyear', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='branchyear')),
('branches', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchname')),
],
options={
'verbose_name_plural': 'Branch year',
},
),
migrations.CreateModel(
name='BranchMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='branchmember')),
('University', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=50)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], default='Male', max_length=20)),
('current_enroll', models.CharField(choices=[('University', 'University'), ('College', 'College'), ('School', 'School'), ('Job', 'Job'), ('Other', 'Other')], max_length=200, null=True)),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
('memberbranch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchyear')),
('namebranch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchname')),
],
options={
'verbose_name_plural': 'Branch member',
'ordering': ('id',),
},
),
]
|
[
"sontustrp.com@gmail.com"
] |
sontustrp.com@gmail.com
|
cb039221da592e976304557e61902704eecbcbac
|
ab0315bcded75c10c591076b22ed8ff664ee76af
|
/fig4/8mods_round4_0919/config_scf_8mods_data_freeze_190917_sub3_1_2.py
|
df15d79c7bc45e5b1e3aad780dae8f8d1bab9a7e
|
[] |
no_license
|
mukamel-lab/BICCN-Mouse-MOp
|
389f62492986a2ffe4278ed16f59fc17dc75b767
|
8058ab8ae827c6e019fff719903b0ba5b400931d
|
refs/heads/master
| 2021-07-06T11:14:25.401628
| 2020-09-30T04:54:27
| 2020-09-30T04:54:27
| 189,758,115
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
#!/usr/bin/env python3
"""An example configuration file
"""
import sys
sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev')
import os
import snmcseq_utils
# # Configs
name = 'mop_8mods_0915_k30_sub3-1-2'
outdir = '/cndd/fangming/CEMBA/data/MOp_all/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_neurons_subtypes_8mods_round4/sub3-1-2'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
# mods_selected = [
# 'snmcseq_gene',
# 'snatac_gene',
# 'smarter_cells',
# 'smarter_nuclei',
# '10x_cells_v2',
# '10x_cells_v3',
# '10x_nuclei_v3',
# '10x_nuclei_v3_macosko',
# ]
mods_selected = snmcseq_utils.import_single_textcol(os.path.join(DATA_DIR, 'datasets.txt'))
print(mods_selected)
features_selected = ['10x_cells_v2']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
}
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = 20
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30
resolutions = [0.1, 0.2, 0.5, 1,]
# umap
umap_neighbors = 30
min_dist = 0.5
|
[
"fmxie1993@gmail.com"
] |
fmxie1993@gmail.com
|
8bfe423384a181fbcaaca4b82f6299f2a9d8cac4
|
b6203a8829e4387031762d7a3d9c2125f82a465e
|
/helloDjango/mainapp/migrations/0011_auto_20210716_1550.py
|
387e9863e32ec579fb9003544d74473618a96248
|
[] |
no_license
|
Jack-liyuanjie/Django01
|
db5f88560d65311987d70325c35f1783ded5ace9
|
7068dab5fe85b21d7a0f052572a68a2fe814fc21
|
refs/heads/master
| 2023-06-27T22:10:44.852483
| 2021-07-28T07:02:13
| 2021-07-28T07:02:13
| 390,248,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# Generated by Django 2.0.1 on 2021-07-16 07:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0010_auto_20210716_1531'),
]
operations = [
migrations.CreateModel(
name='FruitCartEntity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cnt', models.IntegerField(default=1, verbose_name='数量')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.CartEntity', verbose_name='购物车')),
],
options={
'verbose_name': '购物车详情表',
'verbose_name_plural': '购物车详情表',
'db_table': 't_fruit_cart',
},
),
migrations.AlterModelTable(
name='fruitentity',
table='t_fruit',
),
migrations.AddField(
model_name='fruitcartentity',
name='fruit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.FruitEntity', verbose_name='水果名'),
),
]
|
[
"2311485953@qq.com"
] |
2311485953@qq.com
|
16fbdc4c4b8b382fdc8963e09498fcd6e61b7633
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/demo_20201106171218.py
|
e40917f42694981b872f3e9e7b9a58b87321d17f
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
# a = 1
# if a==0:
# print("a=0")
# else:
# print("a!0")
# """
# x>1 (3x-5)
# -1<=x<=1 (x+2)
# x < -1 (5x+3)
# """
# x = int(input("输入您的数字:"))
# if x > 1:
# print(3*x-5)
# else:
# if x >= -1:
# print(x + 2)
# else:
# print(5*x+3)
# 猜数字游戏
# import random
# computet_num = random.randint(1,100)
# while True:
# people_num = int(input("请输入您的数字:"))
# if people_num < computet_num:
# print("大一点")
# elif people_num > computet_num:
# print("小一点")
# else:
# print("猜对了")
# break
# def fun1(a,b,c):
# print("这是参数a:",a)
# print("这是参数b:",b)
# print("这是参数c:",c)
# fun1(1,23,4)
# def fun1(a):
# # return "ac"
# print("a")
# fun1("c")
# def fun1(a,b,c,d):
# print(a,b,c,d)
# fun1(10,13,d=13,c=90)
# fun1 = lambda x: x+10
# print(fun1(5))
# def fun1(x):
# return x+10
# print(fun1(5))
# fun1 = lambda x,y: x+y
# print(fun1(10,12))
list = ["ha"]
b = {"hah"}
c = "a"
print(type(list))
print(type(b))
print(type())
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
e46760ca6a1ddba11d0537d234a557acc6dd3425
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_216/ch25_2020_09_09_21_54_03_750638.py
|
ad2b06073c8d92f2380ee1a03f82efb712a43682
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import math
v = float(input("Velocidade"))
a = float(input("Angulo"))
d = ((v**2) * math.sin(2*a))/9.8
if d <= 98:
print("Muito perto")
else:
if d >= 102:
print("Muito longe")
else:
print("Acertou!")
|
[
"you@example.com"
] |
you@example.com
|
b18bc25f8220824e4dc95d1c7070d671cc8e4d5f
|
e2f9d506dcc3fee7dbbbce370c7e2c3f48275828
|
/poc/merge-multiple-json-file/test.py
|
13f57b73202835f5b6dd25006a623ec8932c627f
|
[
"MIT"
] |
permissive
|
MacHu-GWU/s3splitmerge-project
|
d33829f1ff6aed9cc77c9b4bec30601ce4570f60
|
873892158f4a2d0ee20f291e5d3b2a80f0bae1ba
|
refs/heads/main
| 2023-08-30T09:07:32.312453
| 2021-11-07T16:08:24
| 2021-11-07T16:08:24
| 394,803,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
# -*- coding: utf-8 -*-
import io
import time
import boto3
from boto3.s3.transfer import TransferConfig
from icecream import ic
import awswrangler as wr
from datetime import datetime
import pandas as pd
from pathlib_mate import Path
boto_ses = boto3.session.Session()
s3_client = boto_ses.client("s3")
class Config:
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
n_file = 3
n_records_per_file = 150000
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
def create_test_data():
n_file = 3
n_records_per_file = 150000
columns = ["id", "value"]
value = "alice@example.com"
for nth_file in range(1, 1+n_file):
start_id = (nth_file - 1) * n_records_per_file + 1
end_id = start_id + n_records_per_file
df = pd.DataFrame(columns=columns)
df["id"] = range(start_id, end_id)
df["value"] = value
wr.s3.to_json(
df=df,
path=f"s3://{bucket}/{key_prefix}/{nth_file}.json",
orient="records",
lines=True,
)
def merge_files():
KB = 1024
config = TransferConfig(multipart_threshold=1)
target_key = f"{key_prefix}/data.json"
response = s3_client.create_multipart_upload(
Bucket=bucket,
Key=target_key,
)
upload_id = response["UploadId"]
n_file = 3
s3_key_lst = [
f"{key_prefix}/{nth_file}.json"
for nth_file in range(1, 1+n_file)
]
parts = list()
for part_number, s3_key in enumerate(s3_key_lst):
part_number += 1
response = s3_client.upload_part_copy(
Bucket=bucket,
Key=target_key,
CopySource={"Bucket": bucket, "Key": s3_key},
PartNumber=part_number,
UploadId=upload_id,
)
etag = response["CopyPartResult"]["ETag"]
parts.append({"ETag": etag, "PartNumber": part_number})
s3_client.complete_multipart_upload(
Bucket=bucket,
Key=target_key,
MultipartUpload={"Parts": parts},
UploadId=upload_id
)
if __name__ == "__main__":
create_test_data()
merge_files()
pass
|
[
"MacHu-GWU@users.noreply.github.com"
] |
MacHu-GWU@users.noreply.github.com
|
932b2be3018c7e85db9ac36d0ef3868c1c8bc902
|
89521af529f155a2352003caddd4b5edd58a57a6
|
/sale_invoice_plan/models/sale.py
|
5010af3a87f864d3e5aff52079aa8eef1222c4d7
|
[] |
no_license
|
ecosoft-odoo/eco-addons
|
bd132d326c4af150f16dda7935af23d200e1e3df
|
cb0ebea2cb9a26945093e2a4036a0854b6fc89b2
|
refs/heads/11.0
| 2021-07-15T03:22:37.875705
| 2019-02-23T08:32:59
| 2019-02-23T08:32:59
| 168,274,323
| 0
| 5
| null | 2020-07-17T09:15:20
| 2019-01-30T03:41:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,745
|
py
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from dateutil.relativedelta import relativedelta
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.addons import decimal_precision as dp
from odoo.tools.float_utils import float_round as round
class SaleOder(models.Model):
_inherit = 'sale.order'
invoice_plan_ids = fields.One2many(
comodel_name='sale.invoice.plan',
inverse_name='sale_id',
string='Inovice Plan',
copy=False,
)
use_invoice_plan = fields.Boolean(
string='Use Invoice Plan',
default=False,
copy=False,
)
@api.multi
def create_invoice_plan(self, num_installment, installment_date,
interval, interval_type, advance):
self.ensure_one()
self.invoice_plan_ids.unlink()
invoice_plans = []
if num_installment <= 1:
raise UserError(_('Number Installment must greater than 1'))
Decimal = self.env['decimal.precision']
prec = Decimal.precision_get('Product Unit of Measure')
percent = round(1.0 / num_installment * 100, prec)
percent_last = 100 - (percent * (num_installment-1))
# Advance
if advance:
vals = {'installment': 0, 'plan_date': installment_date,
'type': 'advance', 'percent': 0.0}
invoice_plans.append((0, 0, vals))
installment_date = self._next_date(installment_date,
interval, interval_type)
# Normal
for i in range(num_installment):
this_installment = i+1
if num_installment == this_installment:
percent = percent_last
vals = {'installment': this_installment,
'plan_date': installment_date,
'type': 'installment',
'percent': percent}
invoice_plans.append((0, 0, vals))
installment_date = self._next_date(installment_date,
interval, interval_type)
self.write({'invoice_plan_ids': invoice_plans})
return True
@api.multi
def remove_invoice_plan(self):
self.ensure_one()
self.invoice_plan_ids.unlink()
return True
@api.model
def _next_date(self, installment_date, interval, interval_type):
installment_date = fields.Date.from_string(installment_date)
if interval_type == 'month':
next_date = installment_date + relativedelta(months=+interval)
elif interval_type == 'year':
next_date = installment_date + relativedelta(years=+interval)
else:
next_date = installment_date + relativedelta(days=+interval)
next_date = fields.Date.to_string(next_date)
return next_date
@api.multi
def action_invoice_create(self, grouped=False, final=False):
inv_ids = super().action_invoice_create(grouped=grouped, final=final)
invoice_plan_id = self._context.get('invoice_plan_id')
if invoice_plan_id:
plan = self.env['sale.invoice.plan'].browse(invoice_plan_id)
invoices = self.env['account.invoice'].browse(inv_ids)
invoices.ensure_one() # Expect 1 invoice for 1 invoice plan
plan._compute_new_invoice_quantity(invoices[0])
plan.invoice_ids += invoices
return inv_ids
class SaleInvoicePlan(models.Model):
_name = 'sale.invoice.plan'
_order = 'installment'
sale_id = fields.Many2one(
comodel_name='sale.order',
string='Sales Order',
index=True,
readonly=True,
ondelete='cascade',
)
installment = fields.Integer(
string='Installment',
)
plan_date = fields.Date(
string='Plan Date',
required=True,
)
type = fields.Selection(
[('advance', 'Advance'),
('installment', 'Installment'), ],
string='Type',
required=True,
default='installment',
)
last = fields.Boolean(
string='Last Installment',
compute='_compute_last',
help="Last installment will create invoice use remaining amount",
)
percent = fields.Float(
string='Percent',
digits=dp.get_precision('Product Unit of Measure'),
help="This percent will be used to calculate new quantity"
)
invoice_ids = fields.Many2many(
'account.invoice',
relation="sale_invoice_plan_invoice_rel",
column1='plan_id', column2='invoice_id',
string='Invoices',
readonly=True,
)
to_invoice = fields.Boolean(
string='Next Invoice',
compute='_compute_to_invoice',
help="If this line is ready to create new invoice",
)
invoiced = fields.Boolean(
string='Invoice Created',
compute='_compute_invoiced',
help="If this line already invoiced",
)
_sql_constraint = [('unique_instalment',
'UNIQUE (sale_id, installment)',
'Installment must be unique on invoice plan')]
@api.multi
def _compute_to_invoice(self):
""" If any invoice is in draft/open/paid do not allow to create inv
Only if previous to_invoice is False, it is eligible to_invoice
"""
for rec in self.sorted('installment'):
rec.to_invoice = False
if rec.sale_id.state != 'sale': # Not confirmed, no to_invoice
continue
if not rec.invoiced:
rec.to_invoice = True
break
@api.multi
def _compute_invoiced(self):
for rec in self:
invoiced = rec.invoice_ids.filtered(
lambda l: l.state in ('draft', 'open', 'paid'))
rec.invoiced = invoiced and True or False
@api.multi
def _compute_last(self):
for rec in self:
last = max(rec.sale_id.invoice_plan_ids.mapped('installment'))
rec.last = rec.installment == last
@api.multi
def _compute_new_invoice_quantity(self, invoice):
self.ensure_one()
if self.last: # For last install, let the system do the calc.
return
percent = self.percent
for line in invoice.invoice_line_ids:
assert len(line.sale_line_ids) >= 0, \
'No matched order line for invoice line'
order_line = line.sale_line_ids[0]
if order_line.is_downpayment:
line.quantity = -percent/100 # Always based on 1 unit
else:
line.quantity = order_line.product_uom_qty * (percent/100)
invoice.compute_taxes()
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
3759d19c7821225d7aff6f1f4cd1a6780d8444bb
|
d7390fea6c7f712ee32be6d3478835d965d795e0
|
/py26_08day/task_08day.py
|
db5bc4485d6ca0abe1775d07e118dc8743cc7d00
|
[] |
no_license
|
luwenchun/Automated_Test
|
2f424655d80127e3ed98657869021a775beca868
|
79b9937cfc0841b0a80d4fd45d8ff467654b5b55
|
refs/heads/master
| 2021-02-10T15:23:08.446463
| 2020-03-26T10:39:38
| 2020-03-26T10:39:38
| 244,393,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,430
|
py
|
"""
============================
Author:柠檬班-木森
Time:2019/10/7
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
# 第一题
def mul_table():
for i in range(1, 10):
for j in range(1, i + 1):
print('{} * {} = {:<4}'.format(i,j,i*j),end="")
print()
mul_table()
# for i in range(1, 10):
# print()
# for j in range(1, i + 1):
# print('{}*{}={} '.format(i,j,i*j), end="")
# print()
# 第二题
def count_num():
count = 0
for a in range(1, 5):
for b in range(1, 5):
for c in range(1, 5):
if a != b and c != b and a != c:
print(a, b, c)
number = int('{}{}{}'.format(a,b,c))
print(number)
count += 1
print('一共有{}个'.format(count))
count_num()
# 第三题
def compute_number():
print('欢迎使用计算器')
a = int(input('请输入数字1:'))
b = int(input('请输入数字2:'))
print('功能提示:【1】加 【2】减【3】乘 【4】除')
num = input('请选择:')
if num == '1':
return a + b
elif num == '2':
return a - b
elif num == '3':
return a * b
elif num == '4':
return a / b
else:
print('没有此选项!')
res = compute_number()
print(res)
# 第四题
users = [{"name": "py01", "pwd": "123"},
{"name": "py02", "pwd": "123"},
{"name": "py03", "pwd": "123"},
{"name": "py04", "pwd": "123"}]
def register():
# 注册功能
username = input('请输入新账号:') # 输入账号
password1 = input('请输入密码:') # 输入密码
password2 = input('请再次确认密码:') # 再次确认密码
for user in users: # 遍历出所有账号,判断账号是否存在
if username == user['name']:
print('该账户已存在') # 账号存在,
break
else:
# 判断两次密码是否一致
if password1 != password2:
print('注册失败,两次输入的密码不一致')
else:
# 账号不存在 密码一样,则添加到账户列表中
users.append({'name': username, 'pwd': password2})
print('注册成功!')
register()
|
[
"luwenchun@users.noreply.github.com"
] |
luwenchun@users.noreply.github.com
|
8c810e7e7f2efde783f7790fd2f14511ddd35ac6
|
8ded89b0aff486337e17ddd710eca15b8450a015
|
/first.py
|
25f35fa4facb43418c0376e78efcdf8fc5547efa
|
[] |
no_license
|
svetlyak40wt/moscow-python-confpp-2021
|
2f99881efce9e41f0b281bd9f16d0611025ac684
|
d0b7ce93ac24d0c681697eb17703e975d15fdb27
|
refs/heads/master
| 2023-08-04T07:53:23.776076
| 2021-09-20T09:28:19
| 2021-09-20T09:28:19
| 406,925,502
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
def load_ipython_extension(ipython):
print('Loading "first" extension')
def unload_ipython_extension(ipython):
print('Unloading "first" extension')
|
[
"svetlyak.40wt@gmail.com"
] |
svetlyak.40wt@gmail.com
|
b3603edcdd2487b22caaac500e8b836d86c87e51
|
d01670aa5bddb47dc414bf01921155610e2a5070
|
/leetcode/091_decodeways.py
|
66c865541c4fc56aa6acd0de248dfcfb71736389
|
[] |
no_license
|
hwillmott/csfundamentals
|
14c7e4253b581cef7046ca035bda038c24a52613
|
832f6a8c0deb0569d3fe0dc03e4564c2d850f067
|
refs/heads/master
| 2020-08-01T12:27:01.914391
| 2020-03-26T16:47:35
| 2020-03-26T16:47:35
| 73,576,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0: return 0
if len(s) == 1: return 0 if s[0] == "0" else 1
dp = [0]*(len(s)+1)
dp[0] = 1
dp[1] = 0 if s[1] == "0" else 1
s = "0" + s
for i in range(2,len(s)):
if s[i] == 0: continue
dp[i] = dp[i-1] + dp[i-2] if int(s[i-2:i+1]) <= 26 else dp[i-1]
return dp[len(s)-1]
|
[
"harriet.willmott@gmail.com"
] |
harriet.willmott@gmail.com
|
1b01b4d4b97db401bb32399f6d99e33daa724450
|
ba90cb8a089d38de2c6d63bf65e9bf556731d5c6
|
/Projeto1/aplicacaoR.py
|
026fceaa757598cd22376291ab7299d7ce43933c
|
[] |
no_license
|
luizasilveira/Camada_fisica
|
edc96288250b155b4d68156cf2a9ec638a3cb8c8
|
98c9f6c04d293abaa450c080403d3f25db9c4ef4
|
refs/heads/master
| 2020-07-05T21:00:33.782301
| 2019-10-31T18:50:12
| 2019-10-31T18:50:12
| 202,774,216
| 0
| 0
| null | 2019-09-05T17:33:14
| 2019-08-16T17:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
#!/usr/bin/env python3
# -- coding: utf-8 --
#####################################################
# Camada Física da Computação
#Carareto
#17/02/2018
# Aplicação
####################################################
print("comecou")
from enlace import *
import time
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
#serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
serialName = "/dev/cu.usbmodem146201" # Mac (variacao de)
#serialName = "COM5" # Windows(variacao de)
print("abriu com")
def main():
# Inicializa enlace ... variavel com possui todos os metodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # repare que o metodo construtor recebe um string (nome)
# Ativa comunicacao
com.enable()
# Log
print("-------------------------")
print("Comunicação inicializada")
print(" porta : {}".format(com.fisica.name))
print("-------------------------")
# Faz a recepção dos dados
print ("Recebendo dados .... ")
bufferReceived = bytearray()
while True:
rxBuffer, nRx = com.getData(1)
bufferReceived += rxBuffer
if (b"end" in bufferReceived):
break
imgSize = bufferReceived[:-3]
rxBuffer, nRx = com.getData(int(imgSize))
txLen = len(rxBuffer)
with open("teste.jpg", "wb") as img:
img.write(rxBuffer)
print ("Recebidos {} bytes ".format(txLen))
com.sendData(imgSize)
print ("Transmitido {} bytes ".format(len(imgSize)))
while(com.tx.getIsBussy()):
pass
# Encerra comunicação
print("-------------------------")
print("Comunicação encerrada")
print("-------------------------")
com.disable()
#so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda
if __name__ == "__main__":
main()
|
[
"you@example.com"
] |
you@example.com
|
60fba09736c5b2b4834cc41097c27d5db61e411f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2486/60793/268658.py
|
cb6f125c3651fd7219133d58cda5cf81b8bf149a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
ls = []
for test in range(0, int(input())):
ls.append(input())
if ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
else:
print(ls)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
ab109fe310c0a92a3c5a6c5d7cc674974201c387
|
84a19fe0b89bb19caa1641aeadc9623c1a181767
|
/arc/078/d.py
|
0ff00fea6d789210dda2f0e596992697ebc2dad2
|
[
"MIT"
] |
permissive
|
wotsushi/competitive-programming
|
75abae653cff744189c53ad7e6dbd2ca1a62e3a8
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
refs/heads/master
| 2021-06-10T06:42:40.846666
| 2021-05-31T10:32:51
| 2021-05-31T10:32:51
| 175,002,279
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
from heapq import heappush, heappop
# 入力
N = int(input())
a, b = (
zip(*(map(int, input().split()) for _ in range(N - 1))) if N - 1 else
((), ())
)
# 頂点1, N から各蝶点への距離を求める
G = [{} for _ in range(N + 1)]
for x, y in zip(a, b):
G[x][y] = 1
G[y][x] = 1
INF = 10**10
def dijkstra(G, s):
dp = [INF for _ in range(len(G))]
q = []
heappush(q, (0, s))
while q:
c, i = heappop(q)
if dp[i] == INF:
dp[i] = c
for j, w in G[i].items():
heappush(q, (c + w, j))
return dp
dp1 = dijkstra(G, 1)
dpN = dijkstra(G, N)
# 頂点Nより頂点1のほうが近い頂点、または、頂点1と頂点Nとの距離が等しい頂点は
# 頂点1から頂点Nの間のパスに含まれる頂点のうち、Fennecが塗れる頂点である。
ans = (
'Fennec' if sum(dp1[i] <= dpN[i] for i in range(1, N + 1)) > N // 2 else
'Snuke'
)
# 出力
print(ans)
|
[
"wotsushi@gmail.com"
] |
wotsushi@gmail.com
|
bd9820da489ae49e89dde62f758b29cba5318c2b
|
7997a9581ac4badc53793e32bc85878b8e16094e
|
/breaklines2dxf.py
|
8f168312c1ed8ca374fbae7ab17271d31999fc5e
|
[] |
no_license
|
jasonfleming/pputils
|
78adb832c5860a37e1473a43a5b9a54ad4d5ce55
|
2b34e47e4c3331d5780019d248e3f37c71164665
|
refs/heads/master
| 2022-09-29T13:40:38.657005
| 2022-08-10T19:40:06
| 2022-08-10T19:40:06
| 79,500,881
| 1
| 0
| null | 2017-01-19T22:17:09
| 2017-01-19T22:17:09
| null |
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
#!/usr/bin/env python3
#
#+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!
# #
# breaklines2dxf.py #
# #
#+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!
#
# Author: Pat Prodanovic, Ph.D., P.Eng.
#
# Date: Sept 12, 2015
#
# Modified: Feb 20, 2016
# Made it work for python 2 and 3
#
# Purpose: Takes a pputils 3d breakline and exports it to dxf format.
# To create the 3d breakline from xyz and lines.csv, run mkbreakline.py
#
# Uses: Python 2 or 3, Numpy
#
# Example:
#
# python breaklines2dxf.py -l lines3d.csv -o lines3d.dxf
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os,sys # system parameters
import numpy as np # numpy
from dxfwrite import DXFEngine as dxf # for dxf export
from progressbar import ProgressBar, Bar, Percentage, ETA
curdir = os.getcwd()
#
# I/O
if len(sys.argv) == 5 :
dummy2 = sys.argv[1]
lines_file = sys.argv[2]
dummy3 = sys.argv[3]
output_file = sys.argv[4]
else:
print('Wrong number of Arguments, stopping now...')
print('Usage:')
print('python breaklines2dxf.py -l lines3d.csv -o lines3d.dxf')
sys.exit()
# to create the output file
drawing = dxf.drawing(output_file)
#fout = open(output_file,"w")
# use numpy to read the file
# each column in the file is a row in data read by np.loadtxt method
lines_data = np.loadtxt(lines_file, delimiter=',',skiprows=0,unpack=True)
shapeid_lns = lines_data[0,:]
x_lns = lines_data[1,:]
y_lns = lines_data[2,:]
z_lns = lines_data[3,:]
# round lines nodes to three decimals
x_lns = np.around(x_lns,decimals=3)
y_lns = np.around(y_lns,decimals=3)
z_lns = np.around(z_lns,decimals=3)
# finds out how many unique breaklines there are
n_unique_lns = np.unique(shapeid_lns)
# number of nodes in the lines file
n_lns = len(x_lns)
w = [Percentage(), Bar(), ETA()]
pbar = ProgressBar(widgets=w, maxval=n_lns).start()
# write the breaklines
poly = dxf.polyline()
for i in range(0,n_lns):
pbar.update(i+1)
if (i>0):
cur_lns_shapeid = shapeid_lns[i]
prev_lns_shapeid = shapeid_lns[i-1]
if (cur_lns_shapeid - prev_lns_shapeid < 0.001):
# create tupples for vertexes to add
v0 = (x_lns[i-1], y_lns[i-1], z_lns[i-1])
v1 = (x_lns[i], y_lns[i], z_lns[i])
poly.add_vertices( [v0, v1] )
# this is needed, as the else below is never executed
# for the last line in the lines file!
if (i == n_lns-1):
drawing.add(poly)
else:
drawing.add(poly)
poly = dxf.polyline()
############################################################################
drawing.save()
pbar.finish()
|
[
"pprodano@gmail.com"
] |
pprodano@gmail.com
|
2283913c20c0a5855e4b54e307d09da3b38a5e03
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2019/IsInteger.spec
|
a3eeb6b9762f1cd246c58bd8d2323452533ffde5
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 231
|
spec
|
1. If Type(_argument_) is not Number, return *false*.
1. If _argument_ is *NaN*, *+∞*, or *-∞*, return *false*.
1. If floor(abs(_argument_)) ≠ abs(_argument_), return *false*.
1. Return *true*.
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
8a92c87c32c27752fd7b041c88469c7df129a667
|
70b339d0b2638a7914d0d56c5edf8a2637c9f4b0
|
/maxSideLength.py
|
cddba5c45c0fad94452c6e6bf97e8ce6d0442861
|
[] |
no_license
|
pflun/advancedAlgorithms
|
9991da7514024e18ba08de8688966b9220e12571
|
5520dbcd26999b98e1229bf03c2f62dd690a2ddc
|
refs/heads/master
| 2023-02-19T12:05:26.902535
| 2023-02-14T06:08:54
| 2023-02-14T06:08:54
| 189,055,701
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
class Solution(object):
def maxSideLength(self, mat, threshold):
res = 0
sum = [[0 for _ in range(len(mat[0]) + 1)] for _ in range(len(mat) + 1)]
for i in range(1, len(mat) + 1):
for j in range(1, len(mat[0]) + 1):
sum[i][j] = int(mat[i - 1][j - 1]) + sum[i - 1][j] + sum[i][j - 1] - sum[i - 1][j - 1]
for i in range(1, len(mat) + 1):
for j in range(1, len(mat[0]) + 1):
for k in range(1, min(len(mat) - i + 1, len(mat[0]) - j + 1)):
# large square - two rectangle + small square
tmp = sum[i + k - 1][j + k - 1] - sum[i + k - 1][j - 1] - sum[i - 1][j + k - 1] + sum[i - 1][j - 1]
print k, tmp
if tmp > threshold:
break
else:
res = max(res, k)
return res
test = Solution()
print test.maxSideLength([[1,1,3,2,4,3,2],[1,1,3,2,4,3,2],[1,1,3,2,4,3,2]], 4)
print test.maxSideLength([[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2]], 1)
print test.maxSideLength([[1,1,1,1],[1,0,0,0],[1,0,0,0],[1,0,0,0]], 6)
print test.maxSideLength([[18,70],[61,1],[25,85],[14,40],[11,96],[97,96],[63,45]], 40184)
|
[
"tango7down@icloud.com"
] |
tango7down@icloud.com
|
3805526ca3074737c6cb1a415f59d1e0594a8a86
|
45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0
|
/aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetOptimizeConfigRequest.py
|
19d77da4c23b5c17673b622254d3d165edf91520
|
[
"Apache-2.0"
] |
permissive
|
wanyanzhenjiang/aliyun-openapi-python-sdk
|
e41e9937ad3f851e5a58f6bea95663e88f7fee13
|
4a5bf1b35f2395d047ead4444ea46721976bdd24
|
refs/heads/master
| 2020-12-30T10:37:55.789911
| 2017-07-27T06:55:15
| 2017-07-27T06:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetOptimizeConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetOptimizeConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_Enable(self):
return self.get_query_params().get('Enable')
def set_Enable(self,Enable):
self.add_query_param('Enable',Enable)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
089f41da2f0384caa4f7439752ccbadea77a3cf2
|
e15294825647bb904a32703bc4e7b9008d094710
|
/services/recognize-text/textrecognizer/service.py
|
eabc4901eb16c2c6ad0cbbf6fa22b332c3352ea4
|
[
"MIT"
] |
permissive
|
seekersapp2013/aleph
|
44494ea7c8c02df4f74a817ae2cf901a9a13f099
|
2c4767da3c75d38d8ea39769559a06a1f29390a8
|
refs/heads/master
| 2020-06-14T01:35:31.328619
| 2019-07-01T15:01:59
| 2019-07-01T15:01:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
import grpc
import time
import logging
from threading import RLock
from concurrent import futures
from servicelayer.rpc.ocr_pb2 import Image
from servicelayer.rpc.common_pb2 import Text
from servicelayer.rpc.ocr_pb2_grpc import RecognizeTextServicer
from servicelayer.rpc.ocr_pb2_grpc import add_RecognizeTextServicer_to_server
from textrecognizer.recognize import OCR, PSM
log = logging.getLogger('service')
class OCRServicer(RecognizeTextServicer):
MODES = {
Image.PAGE: PSM.AUTO_OSD,
Image.WORD: PSM.SINGLE_WORD,
Image.CHARACTER: PSM.SINGLE_CHAR
}
def __init__(self):
self.lock = RLock()
self.ocr = OCR()
def Recognize(self, image, context):
# acquired = self.lock.acquire(blocking=False)
# if acquired is False:
# context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
# context.set_details('OCR engine is busy.')
# return Text()
try:
mode = self.MODES.get(image.mode, PSM.AUTO_OSD)
text = self.ocr.extract_text(image.data,
mode=mode,
languages=image.languages)
return Text(text=text)
except Exception as exc:
log.exception("Failed OCR.")
self.ocr.clear_engine()
context.abort(grpc.StatusCode.INTERNAL, str(exc))
# finally:
# self.lock.release()
def serve(port):
options = [('grpc.max_receive_message_length', 20 * 1024 * 1024)]
executor = futures.ThreadPoolExecutor(max_workers=4)
server = grpc.server(executor, options=options)
add_RecognizeTextServicer_to_server(OCRServicer(), server)
server.add_insecure_port(port)
server.start()
log.info("Server started: %s", port)
try:
while True:
time.sleep(84600)
except KeyboardInterrupt:
server.stop(60)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('PIL').setLevel(logging.INFO)
serve('[::]:50000')
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
abb229e9e35ef546af8d18066a68b81643982096
|
48832d27da16256ee62c364add45f21b968ee669
|
/res_bw/scripts/common/lib/plat-sunos5/sunaudiodev.py
|
0a3efa9bcb6cdc1e4b813c180119e8e06ce79f19
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 776
|
py
|
# 2016.08.04 20:01:31 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-sunos5/SUNAUDIODEV.py
from warnings import warnpy3k
warnpy3k('the SUNAUDIODEV module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
ENCODING_NONE = 0
ENCODING_ULAW = 1
ENCODING_ALAW = 2
ENCODING_LINEAR = 3
MIN_GAIN = 0
MAX_GAIN = 255
LEFT_BALANCE = 0
MID_BALANCE = 32
RIGHT_BALANCE = 64
BALANCE_SHIFT = 3
PORT_A = 1
PORT_B = 2
PORT_C = 3
PORT_D = 4
SPEAKER = 1
HEADPHONE = 2
LINE_OUT = 4
MICROPHONE = 1
LINE_IN = 2
CD = 4
INTERNAL_CD_IN = CD
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-sunos5\sunaudiodev.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 20:01:31 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
e5f5f2ed9f2ca264930ccfbd5c09c43a13883401
|
ee974d693ca4c4156121f8cb385328b52eaac07c
|
/env/lib/python3.6/site-packages/h5py/_hl/datatype.py
|
d692af241f56461c8de827c823faa1554b264d1b
|
[] |
no_license
|
ngonhi/Attendance_Check_System_with_Face_Recognition
|
f4531cc4dee565d0e45c02217f73f3eda412b414
|
92ff88cbc0c740ad48e149033efd38137c9be88d
|
refs/heads/main
| 2023-03-12T07:03:25.302649
| 2021-02-26T15:37:33
| 2021-02-26T15:37:33
| 341,493,686
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:ecdc9625758fdf18b86e1432c4f0eaf840a3df0001c5c84f83bc420c7ac598f4
size 1652
|
[
"Nqk180998!"
] |
Nqk180998!
|
da395f369c351d53a3417bc633bb6b772db67d10
|
f9273a89fa79c74efb931787dd5bf6d899730adb
|
/Python_file/python_westos_teacher_doc/day22/03_实现最简单的web开发.py
|
fbc24a7213ca41949ae3194548593f2a7405637a
|
[] |
no_license
|
lianlian-YE/Mypycharm
|
53148a58c6cbc07604abff801fd22b2a3212c6e7
|
004612a52300a6a99fbeedefa14ece0eeaf96556
|
refs/heads/master
| 2020-04-13T22:43:01.650018
| 2019-01-14T15:15:53
| 2019-01-14T15:15:53
| 163,486,739
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
"""
目标:
Flask应用的基本构成?
"""
# 1. 导入Flask类;
from flask import Flask, render_template
# 2. 实例化Flaks类。 生成一个实例;
# __name__结果是__main__或者模块名/包名, 根据这个参数确定项目的位置,(确定该项目的静态文件或者模板的位置);
app = Flask(__name__)
# 3. 通过路由绑定处理的视图函数;
# URL: (eg:http://127.0.0.1:5000/ )
# 装饰器@app.route()告诉Flask哪个url才能触发装饰器装饰的函数, 这个又专业的称为路由;
# 定义的函数hello, return后面的返回值是想要显示在浏览器上的内容;
@app.route('/')
def hello():
return "<h1 style='color:red'>hello python!</h1><br/><a href='/westos/'>西部开源技术中心</a>"
@app.route('/westos/')
def westos():
# 如何在flask程序中返回一个html页面;flask默认查找页面内容的位置为templates目录;
return render_template('westos.html')
if __name__ == "__main__":
# 4. 运行flask应用,
# 默认端口是5000, 如果想要修改端口,传递参数port=xxx;
# 默认情况下该web程序只能在本机浏览器访问, 如果想要其他主机访问, 指定host="0.0.0.0"
app.run(host='0.0.0.0', port=9000)
|
[
"1771764895@qq.com"
] |
1771764895@qq.com
|
bb7edc05ec8515b7ec7187ce1fed24a80cdd19ee
|
7c7c3a34b266e664cf63f710ae5aff5587672c91
|
/TutorialSeries/Pandas/Outlier Detection.py
|
2efd45237f8e7a3ffec7defd1e29d09ed123a69a
|
[] |
no_license
|
Schnei1811/PythonScripts
|
845594a886a1fecc81cf5d7c550abec325f006a3
|
89eb331357b7cea86f5b2d0b33089215b73f9481
|
refs/heads/main
| 2023-03-31T23:36:48.731570
| 2021-04-11T21:55:46
| 2021-04-11T21:55:46
| 356,950,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
bridge_height = {'meters':[10.26, 10.31, 10.27, 10.22, 10.23, 6212.42, 10.28, 10.25, 10.31]}
df = pd.DataFrame(bridge_height)
df['STD'] = pd.rolling_std(df['meters'], 2)
print(df)
df_std = df.describe()
print(df_std)
df_std = df.describe()['meters']['std']
print(df_std)
df = df[ (df['STD'] < df_std) ]
print(df)
'''
df is equal now to df, where df['STD'] is less than the overall df_std that we calculated before.
Thus, the only remaining Data here will be Data where the standard deviation is less than that 2067.
'''
df['meters'].plot()
plt.show()
|
[
"stefan871@gmail.com"
] |
stefan871@gmail.com
|
4dc4c8e77c4b1dbe4795c4926c3672a5c19aeadf
|
c537ce53f435f1c17d5fdbfe8f97405d0fb3f4f3
|
/src/apps/utils/fields.py
|
ae1a5e895451ee1b1590f2bdd8f4023874c796e4
|
[] |
no_license
|
HiPiH/life
|
dbe8892ceb9cc4aaaf6409ffb8391b4903be7fdc
|
8b0cd5906bd5c508776831368896fc935c9e044b
|
refs/heads/master
| 2021-01-10T21:33:17.478873
| 2011-12-21T05:13:55
| 2011-12-21T05:13:55
| 2,938,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from django.db import models
__all__ = ('BigIntegerField', )
class BigIntegerField(models.IntegerField):
empty_strings_allowed=False
def get_internal_type(self):
return "BigIntegerField"
def db_type(self):
return 'bigint' # Note this won't work with Oracle.
|
[
"admin@nvk.su"
] |
admin@nvk.su
|
664e32a8982f250cb5211834d2f57d9b1dc032f0
|
79b0a4d3db7299814b963a8ff98732a6df4fe5f8
|
/worker.py
|
a87844e6fa3d49823247f731fe051eba67a746a8
|
[] |
no_license
|
jattoabdul/Ranti-bot
|
549b28cf84b47d52a767a84e759e1f66c2fee860
|
6673778ed610c2b331e2da7d8348a798b122cb35
|
refs/heads/master
| 2022-12-14T03:24:32.191750
| 2018-06-05T12:04:08
| 2018-06-05T12:04:08
| 136,028,737
| 8
| 6
| null | 2021-06-01T22:12:37
| 2018-06-04T13:24:25
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
from app.actions import Actions
from app.utils.slackhelper import SlackHelper
# Main function
def main():
slackhelper = SlackHelper()
actions = Actions(slackhelper)
actions.notify_channel()
if __name__ == '__main__':
main()
|
[
"jattoade@gmail.com"
] |
jattoade@gmail.com
|
6e772685c7255e17b1203cec46dc6e0be930ed40
|
315788ed9c3727acca394ad107b0a55285a7ddc4
|
/listings/v5_ra9.py
|
d0b7aacba374b0a4b256ffe2a6c38a0b987bb53a
|
[] |
no_license
|
n04hk/Python_Zusammenfassung
|
b118e967d5d5547ad3eb88f9570cb7c9de45d443
|
923fadb28ab4609450e532f08de41dc4bf4913d1
|
refs/heads/master
| 2020-04-24T20:28:43.656148
| 2019-04-28T13:52:31
| 2019-04-28T13:52:31
| 172,245,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
def func(m):
return '(' + m.group() + ')'
s = re.sub(r'\d+', func, '3 Stuecke kosten 250 Franken.')
print(s)
# Ausgabe: (3) Stuecke kosten (250) Franken.
|
[
"37226768+n04hk@users.noreply.github.com"
] |
37226768+n04hk@users.noreply.github.com
|
07e216c632b6520fb95391258bf9ab1b25475733
|
762742b3c5cb5706e93e12dbdc3f8c46fc65f0db
|
/Packs/ML/Scripts/DBotPredictOutOfTheBoxV2/DBotPredictOutOfTheBoxV2.py
|
adc01d9712e96b7678df42b5718f5d43aecb9715
|
[
"MIT"
] |
permissive
|
EmersonElectricCo/content
|
018f95f7fe7de13819e093a3661587a18407e348
|
82c82bbee7d428f0b14991a88c67672e2c02f5af
|
refs/heads/master
| 2021-06-17T04:54:22.938033
| 2021-05-06T16:39:59
| 2021-05-06T16:39:59
| 161,693,191
| 2
| 0
|
MIT
| 2018-12-18T15:16:49
| 2018-12-13T20:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,087
|
py
|
# pylint: disable=no-member
import demisto_ml
from CommonServerPython import *
import traceback
TARGET_PRECISION = 0.97
THRESHOLD = 0.9
OUT_OF_THE_BOX_MODEL_NAME = 'demisto_out_of_the_box_model_v2'
OUT_OF_THE_BOX_MODEL_PATH = '/ml/encrypted_model.b'
EVALUATION_PATH = '/ml/oob_evaluation.txt'
SCRIPT_MODEL_VERSION = '1.0'
OOB_VERSION_INFO_KEY = 'oob_version'
def oob_model_exists_and_updated():
res_model = demisto.executeCommand("getMLModel", {"modelName": OUT_OF_THE_BOX_MODEL_NAME})[0]
if is_error(res_model):
return False
existing_model_version = res_model['Contents']['model']['extra'].get(OOB_VERSION_INFO_KEY, -1)
return existing_model_version == SCRIPT_MODEL_VERSION
def load_oob_model():
try:
encoded_model = demisto_ml.load_oob(OUT_OF_THE_BOX_MODEL_PATH)
except Exception:
return_error(traceback.format_exc())
res = demisto.executeCommand('createMLModel', {'modelData': encoded_model.decode('utf8'),
'modelName': OUT_OF_THE_BOX_MODEL_NAME,
'modelLabels': ['Malicious', 'Non-Malicious'],
'modelOverride': 'true',
'modelType': 'torch',
'modelExtraInfo': {'threshold': THRESHOLD,
OOB_VERSION_INFO_KEY: SCRIPT_MODEL_VERSION
}
})
if is_error(res):
return_error(get_error(res))
with open(EVALUATION_PATH, 'r') as json_file:
data = json.load(json_file)
y_test = data['YTrue']
y_pred = data['YPred']
y_pred_prob = data['YPredProb']
y_pred_evaluation = [{pred: prob} for pred, prob in zip(y_pred, y_pred_prob)]
res = demisto.executeCommand('GetMLModelEvaluation', {'yTrue': json.dumps(y_test),
'yPred': json.dumps(y_pred_evaluation),
'targetPrecision': str(0.85),
'targetRecall': str(0),
'detailedOutput': 'true'
})
if is_error(res):
return_error(get_error(res))
confusion_matrix = json.loads(res[0]['Contents']['csr_matrix_at_threshold'])
confusion_matrix_no_all = {k: v for k, v in confusion_matrix.items() if k != 'All'}
confusion_matrix_no_all = {k: {sub_k: sub_v for sub_k, sub_v in v.items() if sub_k != 'All'}
for k, v in confusion_matrix_no_all.items()}
res = demisto.executeCommand('evaluateMLModel',
{'modelConfusionMatrix': confusion_matrix_no_all,
'modelName': OUT_OF_THE_BOX_MODEL_NAME,
'modelEvaluationVectors': {'Ypred': y_pred,
'Ytrue': y_test,
'YpredProb': y_pred_prob
},
'modelConfidenceThreshold': THRESHOLD,
'modelTargetPrecision': TARGET_PRECISION
})
if is_error(res):
return_error(get_error(res))
def predict_phishing_words():
if not oob_model_exists_and_updated():
load_oob_model()
dargs = demisto.args()
dargs['modelName'] = OUT_OF_THE_BOX_MODEL_NAME
res = demisto.executeCommand('DBotPredictPhishingWords', dargs)
if is_error(res):
return_error(get_error(res))
return res
def main():
res = predict_phishing_words()
return res
if __name__ in ['__main__', '__builtin__', 'builtins']:
demisto.results(main())
|
[
"noreply@github.com"
] |
EmersonElectricCo.noreply@github.com
|
be236401a50222b0114e1126ee9946d74187e9dd
|
3cdb4faf34d8375d6aee08bcc523adadcb0c46e2
|
/web/env/lib/python3.6/site-packages/django/db/backends/base/validation.py
|
a02780a6947b0d164adbcf26dfc8a43433a65b07
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
rizwansoaib/face-attendence
|
bc185d4de627ce5adab1cda7da466cb7a5fddcbe
|
59300441b52d32f3ecb5095085ef9d448aef63af
|
refs/heads/master
| 2020-04-25T23:47:47.303642
| 2019-09-12T14:26:17
| 2019-09-12T14:26:17
| 173,157,284
| 45
| 12
|
MIT
| 2020-02-11T23:47:55
| 2019-02-28T17:33:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
class BaseDatabaseValidation:
"""Encapsulate backend-specific validation."""
def __init__(self, connection):
self.connection = connection
def check(self, **kwargs):
return []
def check_field(self, field, **kwargs):
errors = []
# Backends may implement a check_field_type() method.
if (hasattr(self, 'check_field_type') and
# Ignore any related fields.
not getattr(field, 'remote_field', None)):
# Ignore fields with unsupported features.
db_supports_all_required_features = all(
getattr(self.connection.features, feature, False)
for feature in field.model._meta.required_db_features
)
if db_supports_all_required_features:
field_type = field.db_type(self.connection)
# Ignore non-concrete fields.
if field_type is not None:
errors.extend(self.check_field_type(field, field_type))
return errors
|
[
"rizwansoaib@gmail.com"
] |
rizwansoaib@gmail.com
|
1ddf2f22b39151a8fd975399994f0c47d007c8ef
|
5f2103b1083b088aed3f3be145d01a770465c762
|
/169. Majority Element.py
|
ef741962e751483cad719893fc83c186e65afd3e
|
[] |
no_license
|
supersj/LeetCode
|
5605c9bcb5ddcaa83625de2ad9e06c3485220019
|
690adf05774a1c500d6c9160223dab7bcc38ccc1
|
refs/heads/master
| 2021-01-17T17:23:39.585738
| 2017-02-27T15:08:42
| 2017-02-27T15:08:42
| 65,526,089
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
# Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
#
# Credits:
# Special thanks to @ts for adding this problem and creating all test cases.
#
# Subscribe to see which companies asked this question
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
major = nums[0]
count = 1
for i in range(1,len(nums)):
if count == 0:
count += 1
elif major == nums[i]:
count += 1
else:
count -= 1
return major
|
[
"ml@ml.ml"
] |
ml@ml.ml
|
fcca03f588d55858da75addbb82353a1568ff909
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2234/60632/279655.py
|
5ccddbecc0421ed6a32a6f18b48171c5099a2573
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
# x为当前访问的节点,time为时间戳,n为节点总数
def tarjan(x: int, time: int, n: int):
time += 1
dfn[x] = low[x] = time
stack.append(x)
for y in range(n):
if adj[x][y] == 1:
if dfn[y] == 0:
tarjan(y, time, n)
low[x] = min(low[x], low[y])
elif y in stack:
low[x] = min(low[x], low[y])
if dfn[x] == low[x]:
tmp = []
while stack[-1] != x:
tmp.append(stack.pop())
tmp.append(stack.pop())
result.append(tmp)
n = int(input()) # 间谍人数
p = int(input()) # 愿意被收买的人数
money = [] # 收买所需金额
for i in range(p):
money.append(list(map(int, input().split(' '))))
r = int(input()) # 图中边数
link = [] # 图中的边
for i in range(r):
link.append(list(map(int, input().split(' '))))
adj = [[0 for i in range(n)] for j in range(n)] # 邻接矩阵
for i in link: # 构建邻接矩阵
adj[i[0]-1][i[1]-1] = 1
dfn = [0 for i in range(n)]
low = [0 for i in range(n)]
stack = []
result = []
for i in range(n): # tarjan缩点
if dfn[i] == 0:
tarjan(i, i, n)
print(result)
need = [] # 需要买但又不可买的点,即首先入度为 0
for i in range(n):
col = [adj[j][i] for j in range(n)]
if 1 not in col and i not in [j[0] for j in money]:
need.append(i)
print(need)
print([i[0] for i in money])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
fab46677e2e984639881af549272b656e8a58621
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/tree-20.py
|
8efa7e396a5213ec09e22d51c9664f39cc8e494f
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:$Type = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
# Input parameters
n:int = 100
c:int = 4
# Data
t:Tree = None
i:int = 0
k:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
d31c9d70adba120acd6845805f35c9d9fa7cf28e
|
6ed6171b313cd485764d6d952b0f06c07d078f5d
|
/api/permissions.py
|
1fdded12e31dbd62b79d671d54169e99c8374fba
|
[] |
no_license
|
yezyilomo/marathon
|
db802724cecf0360a3e9864d70402d029a10e7b6
|
75e4fd42ffe37e9b6b1d1bf6610a8bea6b12113e
|
refs/heads/master
| 2021-05-26T16:15:47.632053
| 2020-04-08T16:08:29
| 2020-04-08T16:08:29
| 254,133,385
| 3
| 0
| null | 2020-04-08T16:08:30
| 2020-04-08T15:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
from rest_framework import permissions
class IsAllowedUser(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj == request.user
class IsCategoryOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.marathon.organizer == request.user
class IsSponsorOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.marathon.organizer == request.user
class IsMarathonOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.organizer == request.user
class IsPaymentOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.user == request.user or obj.marathon.organizer == request.user
class IsAdminUser(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_admin
|
[
"yezileliilomo@hotmail.com"
] |
yezileliilomo@hotmail.com
|
2359691b0b5c80c8263fd3b40be86eedd64a19ee
|
0c70dcec22a090e70b1f20613ea6e0a64fd9a037
|
/GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/io/test_gcs.py
|
b80320e7700f52123bfb6001760260a947bec930
|
[
"MIT"
] |
permissive
|
payiz-asj/Gis
|
82c1096d830878f62c7a0d5dfb6630d4e4744764
|
3d315fed93e2ab850b836ddfd7a67f5618969d10
|
refs/heads/main
| 2023-06-27T15:25:17.301154
| 2021-08-03T10:02:58
| 2021-08-03T10:02:58
| 392,269,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,090
|
py
|
from io import BytesIO
import os
import numpy as np
import pytest
from pandas import DataFrame, date_range, read_csv
import pandas._testing as tm
from pandas.util import _test_decorators as td
@td.skip_if_no("gcsfs")
def test_read_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
return BytesIO(df1.to_csv(index=False).encode())
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("gcsfs")
def test_to_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
s = BytesIO()
s.close = lambda: True
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
s.seek(0)
return s
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_csv("gs://test/test.csv", index=True)
def mock_get_filepath_or_buffer(*args, **kwargs):
return BytesIO(df1.to_csv(index=True).encode()), None, None, False
monkeypatch.setattr(
"pandas.io.common.get_filepath_or_buffer", mock_get_filepath_or_buffer
)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("fastparquet")
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(self, path, mode="r", *args):
if "w" not in mode:
raise FileNotFoundError
return open(os.path.join(tmpdir, "test.parquet"), mode)
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_parquet(
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_installed("gcsfs")
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
read_csv("gs://test/test.csv")
assert "gcsfs library is required" in str(e.value)
|
[
"1778029840@qq.com"
] |
1778029840@qq.com
|
e010af64e48efa09d26f1e4b7f943a02082dace2
|
bbe447a740929eaee1955bd9c1517cf760dd5cb9
|
/keygrabber/adwords/adwords_api_python_14.2.1/adspygoogle/adwords/zsi/v201008/AdGroupAdService_services.py
|
bce9883a53e9d6a36b68f760daeabf22fc68622c
|
[
"Apache-2.0"
] |
permissive
|
MujaahidSalie/aranciulla
|
f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893
|
34197dfbdb01479f288611a0cb700e925c4e56ce
|
refs/heads/master
| 2020-09-07T02:16:25.261598
| 2011-11-01T21:20:46
| 2011-11-01T21:20:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,205
|
py
|
##################################################
# AdGroupAdService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from AdGroupAdService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class AdGroupAdServiceLocator:
AdGroupAdServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v201008/AdGroupAdService"
def getAdGroupAdServiceInterfaceAddress(self):
return AdGroupAdServiceLocator.AdGroupAdServiceInterface_address
def getAdGroupAdServiceInterface(self, url=None, **kw):
return AdGroupAdServiceSoapBindingSOAP(url or AdGroupAdServiceLocator.AdGroupAdServiceInterface_address, **kw)
# Methods
class AdGroupAdServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getAdGroupAd
def getAdGroupAd(self, request):
if isinstance(request, getAdGroupAdRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getAdGroupAdResponse.typecode)
return response
# mutate: getAdGroupAd
def mutateAdGroupAd(self, request):
if isinstance(request, mutateAdGroupAdRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateAdGroupAdResponse.typecode)
return response
getAdGroupAdRequest = ns0.getAdGroupAd_Dec().pyclass
getAdGroupAdResponse = ns0.getAdGroupAdResponse_Dec().pyclass
mutateAdGroupAdRequest = ns0.mutateAdGroupAd_Dec().pyclass
mutateAdGroupAdResponse = ns0.mutateAdGroupAdResponse_Dec().pyclass
|
[
"vincenzo.ampolo@gmail.com"
] |
vincenzo.ampolo@gmail.com
|
39319351c0c7faafc21e10a864c35e6716a3785f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/945.py
|
7863ed26a1e33eacbab4fd0315c93eed52357ba7
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
#/usr/bin/python3
def solve(N):
N = list(N)
res = ""
prev = 0
while N:
act = int(N.pop(0))
#print(prev, act)
if prev <= act:
res += str(prev)
prev = act
else:
res += str(prev-1)
res += "9"*len(N)
prev = 9
break
res += str(prev)
return str(int(res))
T = int(input())
for t in range(T):
N = input()
while True:
M = solve(N)
if M == N:
break
else:
N = M
print("Case #{0}: {1}".format(t+1, int(N)))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
76b05533f9e6516133fbc32a4c854068b3945a09
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225511.py
|
4f21199c88011b2b2d4020bf52f226457160cd94
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.csv',encoding='utf-8') as article:
article.write
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
a7fa0c2b13e99441b55229c95b761c210c29ac52
|
253089ef4ee99c50cdaa23fde4d789794789e2e9
|
/134/test_twosums.py
|
842a20ecb4b75611347c73b305dd9b21be332816
|
[] |
no_license
|
Zaubeerer/bitesofpy
|
194b61c5be79c528cce3c14b9e2c5c4c37059259
|
e5647a8a7a28a212cf822abfb3a8936763cd6b81
|
refs/heads/master
| 2021-01-01T15:01:21.088411
| 2020-11-08T19:56:30
| 2020-11-08T19:56:30
| 239,328,990
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
import pytest
from random import sample, seed
from twosums import two_sums
NUMBERS = [
2202, 9326, 1034, 4180, 1932, 8118, 7365, 7738, 6220, 3440, 1538, 7994, 465,
6387, 7091, 9953, 35, 7298, 4364, 3749, 9686, 1675, 5201, 502, 366, 417,
8871, 151, 6246, 3549, 6916, 476, 8645, 3633, 7175, 8124, 9059, 3819, 5664,
3783, 3585, 7531, 4748, 353, 6819, 9117, 1639, 3046, 4857, 1981]
def test_two_sums():
"""Test of the example given in the description"""
numbers = [3, 10, 14, 8, 15, 5, 16, 13, 9, 2]
expected = (2, 6)
target = 30
result = two_sums(numbers, target)
assert result == expected
@pytest.mark.parametrize("target, expected", [
(10093, (2, 36)),
(7067, (27, 30)),
(11261, (0, 36)),
(11350, (37, 41)),
(5224, (31, 42)),
(2934785974, None),
])
def test_two_sums_param(target, expected):
result = two_sums(NUMBERS, target)
assert result == expected
def test_two_sums_random():
seed(1)
numbers = sample(range(1, 1_000_000), 1_000)
picked = sample(numbers, 2)
index1 = numbers.index(picked[0])
index2 = numbers.index(picked[1])
ordered = sorted([index1, index2])
expected = ordered[0], ordered[1]
target = sum(picked)
result = two_sums(numbers, target)
assert result == expected
def test_two_sums_none():
result = two_sums(NUMBERS, 7000)
assert result is None
|
[
"r.beer@outlook.de"
] |
r.beer@outlook.de
|
0eb6589094d8dfef39e0ec486bb16c31b27fc3f3
|
36e27ca74b734994fb2e5cd4e328e7b82202d8cd
|
/nodarb/migrations/0003_telpa.py
|
1fe984dec5f7810becd4c75505ce077de3cfe3d1
|
[] |
no_license
|
svabis/vf
|
5e9513f3a767a9561e2fb8bd3e37bb3c03d113dd
|
d83a4afd177e4f7007a9ce824ae5ed36f18654fc
|
refs/heads/master
| 2020-05-21T21:19:59.952463
| 2018-06-04T11:11:50
| 2018-06-04T11:11:50
| 84,647,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nodarb', '0002_auto_20170311_1322'),
]
operations = [
migrations.CreateModel(
name='Telpa',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('telpa', models.CharField(max_length=5, choices=[(b'L', b'liel\xc4\x81 z\xc4\x81le'), (b'M', b'maz\xc4\x81 z\xc4\x81le'), (b'G', b'gym z\xc4\x81le'), (b'V', b'velo z\xc4\x81le'), (b'C', b'c\xc4\xab\xc5\x86u z\xc4\x81le')])),
],
options={
'db_table': 'telpa',
'verbose_name': 'Telpa',
},
),
]
|
[
"fizmats@inbox.lv"
] |
fizmats@inbox.lv
|
2157e48e1a135e8fc11033df646c00f9085d895f
|
5930f323d96e7ed45c01fef63b100e1ad220f764
|
/catalyst/core/callbacks/__init__.py
|
14c691fa96ccc1abd7e4f3bc8f85c97e1378d05c
|
[
"Apache-2.0"
] |
permissive
|
saswat0/catalyst
|
8cb91c2392bccdbdd318544e6861e6fe6ac39b33
|
a35297ecab8d1a6c2f00b6435ea1d6d37ec9f441
|
refs/heads/master
| 2023-04-05T00:43:29.124864
| 2020-06-18T05:41:33
| 2020-06-18T05:41:33
| 272,268,902
| 2
| 0
|
Apache-2.0
| 2020-06-18T05:41:34
| 2020-06-14T19:24:04
| null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# flake8: noqa
from .checkpoint import CheckpointCallback, IterationCheckpointCallback
from .criterion import CriterionCallback
from .early_stop import CheckRunCallback, EarlyStoppingCallback
from .exception import ExceptionCallback
from .logging import ConsoleLogger, TensorboardLogger, VerboseLogger
from .metrics import (
MetricAggregationCallback,
MetricCallback,
MetricManagerCallback,
MultiMetricCallback,
)
from .optimizer import OptimizerCallback
from .scheduler import LRUpdater, SchedulerCallback
from .timer import TimerCallback
from .validation import ValidationManagerCallback
|
[
"noreply@github.com"
] |
saswat0.noreply@github.com
|
1f25baa28cba0a1250c9712f3a1de7ccb89556b5
|
d9aa4291a4978b932bef84b8d26aa4b911ca2add
|
/day111Flask前戏/02偏函数.py
|
d5009db7bda2799235778291411ea579600ab40d
|
[] |
no_license
|
SelfShadows/my_git
|
9a32d3713efb1b055d04c813b319eb2196fdcf53
|
b10a4c838e1146b3f6ce297480840de9a8e89206
|
refs/heads/master
| 2020-12-15T22:33:49.273814
| 2020-02-14T16:33:46
| 2020-02-14T16:33:46
| 235,274,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
import functools
def index(a,b):
return a+b
# 原来的调用方法
ret = index(3,1)
print(ret)
# 偏函数, 帮助开发者自动传递参数
new_func = functools.partial(index, 55)
ret = new_func(1)
print(ret)
|
[
"870670791@qq.com"
] |
870670791@qq.com
|
aea2810a65c8473673713d7073b868aa0057c771
|
8c4ef53ec6c7df2eeeb633a53d1d931558596366
|
/propertyestimator/protocols/storage.py
|
5ed3b8a157424797084b7132befd9c807d2fdb0d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MSchauperl/propertyestimator
|
ff7bf2d3b6bc441141258483ec991f8806b09469
|
9a67cb61498024c511f9bbe55536ac8e1a3c93be
|
refs/heads/master
| 2020-09-08T07:04:39.660322
| 2019-11-08T21:15:23
| 2019-11-08T21:15:23
| 221,055,340
| 0
| 0
|
NOASSERTION
| 2019-11-14T21:47:11
| 2019-11-11T19:34:28
| null |
UTF-8
|
Python
| false
| false
| 7,869
|
py
|
"""
A collection of protocols for loading cached data off of the disk.
"""
import json
from os import path
from typing import Union
from propertyestimator.storage.dataclasses import StoredDataCollection
from propertyestimator.substances import Substance
from propertyestimator.thermodynamics import ThermodynamicState
from propertyestimator.utils.exceptions import PropertyEstimatorException
from propertyestimator.utils.serialization import TypedJSONDecoder, TypedJSONEncoder
from propertyestimator.workflow.decorators import protocol_input, protocol_output, UNDEFINED
from propertyestimator.workflow.plugins import register_calculation_protocol
from propertyestimator.workflow.protocols import BaseProtocol
@register_calculation_protocol()
class UnpackStoredDataCollection(BaseProtocol):
"""Loads a `StoredDataCollection` object from disk,
and makes its inner data objects easily accessible to other protocols.
"""
input_data_path = protocol_input(
docstring='A tuple which contains both the path to the simulation data object, '
'it\'s ancillary data directory, and the force field which was used '
'to generate the stored data.',
type_hint=Union[list, tuple],
default_value=UNDEFINED
)
collection_data_paths = protocol_output(
docstring='A dictionary of data object path, data directory path and '
'force field path tuples partitioned by the unique collection '
'keys.',
type_hint=dict
)
def execute(self, directory, available_resources):
if len(self.input_data_path) != 3:
return PropertyEstimatorException(directory=directory,
message='The input data path should be a tuple '
'of a path to the data object, directory, and a path '
'to the force field used to generate it.')
data_object_path = self.input_data_path[0]
data_directory = self.input_data_path[1]
force_field_path = self.input_data_path[2]
if not path.isfile(data_object_path):
return PropertyEstimatorException(directory=directory,
message='The path to the data object'
'is invalid: {}'.format(data_object_path))
if not path.isdir(data_directory):
return PropertyEstimatorException(directory=directory,
message='The path to the data directory'
'is invalid: {}'.format(data_directory))
if not path.isfile(force_field_path):
return PropertyEstimatorException(directory=directory,
message='The path to the force field'
'is invalid: {}'.format(force_field_path))
with open(data_object_path, 'r') as file:
data_object = json.load(file, cls=TypedJSONDecoder)
if not isinstance(data_object, StoredDataCollection):
return PropertyEstimatorException(directory=directory,
message=f'The data object must be a `StoredDataCollection` '
f'and not a {type(data_object)}')
self.collection_data_paths = {}
for data_key, inner_data_object in data_object.data.items():
inner_object_path = path.join(directory, f'{data_key}.json')
inner_directory_path = path.join(data_directory, data_key)
with open(inner_object_path, 'w') as file:
json.dump(inner_data_object, file, cls=TypedJSONEncoder)
self.collection_data_paths[data_key] = (inner_object_path,
inner_directory_path,
force_field_path)
return self._get_output_dictionary()
@register_calculation_protocol()
class UnpackStoredSimulationData(BaseProtocol):
"""Loads a `StoredSimulationData` object from disk,
and makes its attributes easily accessible to other protocols.
"""
simulation_data_path = protocol_input(
docstring='A list / tuple which contains both the path to the simulation data '
'object, it\'s ancillary data directory, and the force field which '
'was used to generate the stored data.',
type_hint=Union[list, tuple],
default_value=UNDEFINED
)
substance = protocol_output(
docstring='The substance which was stored.',
type_hint=Substance
)
total_number_of_molecules = protocol_output(
docstring='The total number of molecules in the stored system.',
type_hint=int
)
thermodynamic_state = protocol_output(
docstring='The thermodynamic state which was stored.',
type_hint=ThermodynamicState
)
statistical_inefficiency = protocol_output(
docstring='The statistical inefficiency of the stored data.',
type_hint=float
)
coordinate_file_path = protocol_output(
docstring='A path to the stored simulation output coordinates.',
type_hint=str
)
trajectory_file_path = protocol_output(
docstring='A path to the stored simulation trajectory.',
type_hint=str
)
statistics_file_path = protocol_output(
docstring='A path to the stored simulation statistics array.',
type_hint=str
)
force_field_path = protocol_output(
docstring='A path to the force field parameters used to generate the stored data.',
type_hint=str
)
def execute(self, directory, available_resources):
if len(self.simulation_data_path) != 3:
return PropertyEstimatorException(directory=directory,
message='The simulation data path should be a tuple '
'of a path to the data object, directory, and a path '
'to the force field used to generate it.')
data_object_path = self.simulation_data_path[0]
data_directory = self.simulation_data_path[1]
force_field_path = self.simulation_data_path[2]
if not path.isdir(data_directory):
return PropertyEstimatorException(directory=directory,
message='The path to the data directory'
'is invalid: {}'.format(data_directory))
if not path.isfile(force_field_path):
return PropertyEstimatorException(directory=directory,
message='The path to the force field'
'is invalid: {}'.format(force_field_path))
with open(data_object_path, 'r') as file:
data_object = json.load(file, cls=TypedJSONDecoder)
self.substance = data_object.substance
self.total_number_of_molecules = data_object.total_number_of_molecules
self.thermodynamic_state = data_object.thermodynamic_state
self.statistical_inefficiency = data_object.statistical_inefficiency
self.coordinate_file_path = path.join(data_directory, data_object.coordinate_file_name)
self.trajectory_file_path = path.join(data_directory, data_object.trajectory_file_name)
self.statistics_file_path = path.join(data_directory, data_object.statistics_file_name)
self.force_field_path = force_field_path
return self._get_output_dictionary()
|
[
"simon.boothroyd@choderalab.org"
] |
simon.boothroyd@choderalab.org
|
71a3bb66245a57da0b9a687f7218be571dc7bd17
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/sleekxmpp/plugins/xep_0080/__init__.py
|
cad23d221c641d7c8215c12347b0c7f54ddb50af
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz, Erik Reuterborg Larsson
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.base import register_plugin
from sleekxmpp.plugins.xep_0080.stanza import Geoloc
from sleekxmpp.plugins.xep_0080.geoloc import XEP_0080
register_plugin(XEP_0080)
|
[
"lancestout@gmail.com"
] |
lancestout@gmail.com
|
8d2ecc7056b9b55738c10426695a9662c51431d1
|
d9f6f439300d298246c37ccfb881e8e8af4fda22
|
/cfp/management/commands/pgimport.py
|
b2d47ebf1f43914e8cb93b5055a27b966d35cd71
|
[
"MIT"
] |
permissive
|
ajlozier/speakers
|
e62b8d346a58a034998860d1b42a38b00cbdbd23
|
d7d87c99b1cfa5f9df5455f737385115d9d5279c
|
refs/heads/master
| 2021-09-08T19:33:08.894305
| 2018-03-12T00:54:10
| 2018-03-12T00:54:10
| 122,101,157
| 0
| 0
| null | 2018-02-19T18:08:18
| 2018-02-19T18:08:18
| null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import os
import subprocess as sh
from cfp.management.base import SentryCommand
class Command(SentryCommand):
help = 'Import the production database locally'
def handle(self, *args, **options):
if not os.environ['ENVIRONMENT'] == 'DEVELOPMENT':
raise ValueError('This command can only be run in development')
try:
sh.check_call(['dropdb', 'speakers'])
sh.check_call(['createdb', 'speakers'])
sh.check_call(['heroku', 'pgbackups:capture'])
url = sh.check_output(['heroku', 'pgbackups:url'])
sh.check_call(['curl', '-o', 'latest.dump', url])
sh.call(['pg_restore', '--verbose', '--clean', '--no-acl',
'--no-owner', '-j', '2', '-h', 'localhost', '-d',
'speakers', 'latest.dump'])
finally:
if os.path.exists('latest.dump'):
os.unlink('latest.dump')
|
[
"kyle@kyleconroy.com"
] |
kyle@kyleconroy.com
|
a8116934e376d20bbab74c6b1f04d617a5ffe9ec
|
1484f2311bc250a2ffd3841ec225855ad1e49ede
|
/web/trpo_plot.py
|
e9e3a8343ec3e49b21f05983053c327efc1e0bab
|
[
"Apache-2.0"
] |
permissive
|
seba-1511/dtrpo.tf
|
68527e2e043d1afc5545a961a5542447d66c658e
|
af6c1376eff0c82e591374b785a3d460407d3663
|
refs/heads/master
| 2021-03-27T13:00:22.816806
| 2017-09-14T20:38:24
| 2017-09-14T20:38:24
| 68,479,754
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
#!/usr/bin/env python
import numpy as np
from seb.plot import Plot3D, Plot, Container, Animation
def grad_descent(x, y, dfnx, dfny, alpha=0.2, length=50):
trace = [(x, y)]
for _ in range(length):
x = x - alpha * dfnx(x)
y = y - alpha * dfny(y)
trace.append((x, y))
return np.array(trace), (x, y)
if __name__ == '__main__':
point_considered = -36
x_init = -1.9
y_init = -1
x = np.linspace(-7, 7, 50)
# 3D example
fn = lambda x, y: -np.sin(x / 2.0) + y**2
dfnx = lambda x: -0.5 * np.cos(x/2.0)
dfny = lambda y: 2*y
fig3d = Plot3D()
fig3d.surface(x, np.cos(x + 0.5), fn)
# fig3d.projection(x, np.cos(x + 0.5), fn)
fig3d.set_camera(45, 66)
fig3d.set_axis('x axis', 'y axis', 'z axis')
trace, (x_final, y_final) = grad_descent(x_init, y_init, dfnx, dfny)
fig3d.scatter(x=[trace[point_considered, 0], ],
y=[trace[point_considered, 1], ],
z=fn,
s=350.0, label='Trust Region')
fig3d.plot(x=trace[:, 0], y=trace[:, 1], z=fn, label='Trajectory')
fig3d.save('trpo3d.png')
# 1D Example
fig1d = Plot()
trace = trace[:-15]
point_considered = point_considered + 15
z = 10 * np.array([fn(a[0], a[1]) for a in trace])
iterations = np.arange(len(trace))
fig1d.circle(x=iterations[point_considered], y=z[point_considered], radius=1.0)
fig1d.plot(x=iterations, y=z, label='True Loss')
fig1d.scatter(x=[iterations[point_considered], ], y=[z[point_considered], ], label='Current params', s=10.0)
fig1d.annotate('Trust Region', (18, 17), (15, 5), rad=0.3)
fig1d.set_axis('Parameters', 'Cost')
# Hypothetical curves
x_trunc = iterations[point_considered:]
z_trunc = z[point_considered:]
z2 = [z_trunc[0] + np.sin((a - z_trunc[0])) for a in z_trunc]
fig1d.plot(x=x_trunc, y=z2)
z2 = [z_trunc[0] + np.sin((a - z_trunc[0])) for a in z_trunc]
fig1d.plot(x=x_trunc, y=z2)
z3 = [z_trunc[0] + 2*(a - z_trunc[0]) for a in z_trunc]
fig1d.plot(x=x_trunc, y=z3)
fig1d.save('conv.png')
cont = Container(1, 2)
cont.set_plot(0, 0, fig3d)
cont.set_plot(0, 1, fig1d)
cont.save('full.png')
# anim = Animation()
# fig3d.canvas.axis('off')
# anim.rotate_3d(fig3d)
# anim.save('trpo3d.gif')
|
[
"seba-1511@hotmail.com"
] |
seba-1511@hotmail.com
|
b91a9f014091d145beefd604b36e716ee1e6cd3b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_160/ch47_2019_09_30_21_19_06_189136.py
|
30315c6f92701799d9635e7747e2e53b795f6ba8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
mes = ["Janeiro", 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho', 'Agosto','Setembro', 'Outubro', 'Novembro', 'Dezembro']
a = int(input("Qual o numero do mês?"))
print (mes[a-1])
|
[
"you@example.com"
] |
you@example.com
|
0c4bfd10df3bbda83d7ce3efebdafd3fc2400fa6
|
4331b28f22a2efb12d462ae2a8270a9f666b0df1
|
/.history/dvdstore/webapp/form_20190914173126.py
|
5fb05c3529c11f9f5ddfc438f3c22c91bac43a33
|
[] |
no_license
|
ZiyaadLakay/csc312.group.project
|
ba772a905e0841b17478eae7e14e43d8b078a95d
|
9cdd9068b5e24980c59a53595a5d513c2e738a5e
|
refs/heads/master
| 2020-07-26T23:30:22.542450
| 2019-09-16T11:46:41
| 2019-09-16T11:46:41
| 200,703,160
| 0
| 0
| null | 2019-08-05T17:52:37
| 2019-08-05T17:52:37
| null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
from django import forms
from .models import DVD, Customer
from django.contrib.auth.models import User, auth
class DocumentForm(forms.ModelForm):
class Meta:
model = DVD
fields = ('Title','year','genre','PriceDVD','InStock','Synopsis','BookingPickup' ,'NumOfTimesRented','ImageDVD')
widgets = {'summary': Textarea(attrs={'rows':80, 'cols':20}),}
class CustomerForm(forms.ModelForm):
class Meta:
model= Customer
#user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
fields = ('username','password','email','first_name','last_name','phone_number','address','identification')
class customerForm2:
class Meta:
model= Customer
fields = ('username','password','email','first_name','last_name','phone_number','address','identification','isStaff')
|
[
"uzairjoneswolf@gmail.com"
] |
uzairjoneswolf@gmail.com
|
e0af43df6c45466129cf468cb3fa6be008df41a7
|
e32fbfdd7e4c8060faf97d0f046b5c957b4695f8
|
/app/tests/test_diffing.py
|
5de81ab73492233fb2f9d4ddf4101528f5e10996
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
fidlej/myerrata
|
ffeb777cc1b8cd2151b7dc7e79f1e1dd0fabed06
|
c5b5cc78303bb783eb59a6d6c628d2b27a6584ca
|
refs/heads/master
| 2020-04-11T16:02:41.134009
| 2010-08-25T15:21:48
| 2010-08-25T15:21:48
| 32,650,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
# coding: utf-8
from nose.tools import eq_
from src import diffing
def test_mark_changes():
tests = [
(("buy big car", "buy small car"),
"buy <del>big</del><ins>small</ins> car"),
(("buy big car", "buy small red car"),
"buy <del>big</del><ins>small red</ins> car"),
(("buy big car", "buy small car and test it"),
"buy <del>big</del><ins>small</ins> car<del></del><ins> and test it</ins>"),
(("buy big expensive car", "buy small car"),
"buy <del>big expensive</del><ins>small</ins> car"),
(("come to visit me and buy me a new algorithm", "algorithm, come to visit me and buy milk"),
"<ins>algorithm, </ins>come to visit me and buy <del>me a new algorithm</del><ins>milk</ins>"),
(("buy milk", "buy me a new algorithm"),
"buy <del>milk</del><ins>me a new algorithm</ins>"),
(("say something to me", "do you have anything to say?"),
"<ins>do you have anything to </ins>say<del> something to me</del><ins>?</ins>"),
((u"change vaše property", u"change naše property"),
u"change <del>vaše</del><ins>naše</ins> property"),
]
for args, expected in tests:
eq_(diffing.mark_changes(*args), expected)
|
[
"ivo@danihelka.net"
] |
ivo@danihelka.net
|
950fcc02c58cf78ac95e31320c47621e7685ecc1
|
9f674f9ba21a345bb3d573e0c77c4343427e1aca
|
/CorePython/11-ExceptionHandling/03-TryElse.py
|
03207550771d87ab85a0332b854bd9c3cbf7b618
|
[] |
no_license
|
ravi4all/PythonWE_11-12_30
|
f9c91c5ed238476933c0b92e55492259da8e4311
|
c4d7a4bd3939b82056ed47d5a04624ec7565125f
|
refs/heads/master
| 2021-09-13T22:34:31.082037
| 2018-05-05T11:13:05
| 2018-05-05T11:13:05
| 105,269,134
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
try:
file = open("file_1.txt")
data = file.read()
print(data)
file.seek(0,1,2)
except BaseException as err:
print("Error...",err)
else:
print("Inside Else")
finally:
print("File closed...")
file.close()
|
[
"noreply@github.com"
] |
ravi4all.noreply@github.com
|
ef59afb84dff253a108d771345e0adaaaa95a998
|
37aae70d77b0d4a0f2b073e5e032810b54f96657
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/tasks/__init__.py
|
bf325b45fc95c19b9adb58c3266cee30e157326d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
talentdeveloper/casino_bot
|
454a493ee09482ebe5ff00f0e983d2b2d99f7d85
|
60d1781934dd018055bac1e2b7ded44216ff875c
|
refs/heads/master
| 2022-12-20T10:24:30.767164
| 2019-06-14T11:31:52
| 2019-06-14T11:31:52
| 189,037,455
| 0
| 1
| null | 2022-12-09T04:15:46
| 2019-05-28T13:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Library for gcloud cloudtasks."""
from googlecloudsdk.api_lib.util import apis
API_NAME = 'cloudtasks'
API_VERSION = 'v2beta2'
def GetClientInstance(no_http=False):
return apis.GetClientInstance(API_NAME, API_VERSION, no_http=no_http)
def GetMessagesModule(client=None):
client = client or GetClientInstance()
return client.MESSAGES_MODULE
class ApiAdapter(object):
def __init__(self, client=None, messages=None):
client = client or GetClientInstance()
self.messages = messages or GetMessagesModule(client)
self.queues_service = client.projects_locations_queues
self.tasks_service = client.projects_locations_queues_tasks
|
[
"yflumee9396@gmail.com"
] |
yflumee9396@gmail.com
|
b2d3b409484443dd593d6a9fee8c104fc0b84c0c
|
60715c9ea4c66d861708531def532814eab781fd
|
/python-programming-workshop/test/pythondatastructures/convert/bytes_to_megabytes.py
|
db68a188b17cb868fd29c58bcf7d2c17c9924547
|
[] |
no_license
|
bala4rtraining/python_programming
|
6ce64d035ef04486f5dc9572cb0975dd322fcb3e
|
99a5e6cf38448f5a01b310d5f7fa95493139b631
|
refs/heads/master
| 2023-09-03T00:10:26.272124
| 2021-11-01T08:20:52
| 2021-11-01T08:20:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#Python that converts bytes, megabytes
def bytestomegabytes(bytes):
return (bytes / 1024) / 1024
def kilobytestomegabytes(kilobytes):
return kilobytes / 1024
# Convert 100000 bytes to megabytes.
megabytes1 = bytestomegabytes(100000)
print(100000, "bytes =", megabytes1, "megabytes")
# 1024 kilobytes to megabytes.
megabytes2 = kilobytestomegabytes(1024)
print(1024, "kilobytes =", megabytes2, "megabytes")
|
[
"karthikkannan@gmail.com"
] |
karthikkannan@gmail.com
|
0f08202392d68a8a2f30a4e9fa2f592bba1057cc
|
ff92e5cc5a96277188eb34df60d7119947b6349e
|
/core/gdrn_selfocc_modeling/tools/pose_aug.py
|
c3b8c8a23ab102882083298db0b79d116295247d
|
[
"Apache-2.0"
] |
permissive
|
Pamyuu/SO-Pose
|
000783e66e52e37f8fcfc246964695c6cdc3e13d
|
a3a61d2c97b1084a4754d6c12e45e16d85809729
|
refs/heads/main
| 2023-08-16T14:33:48.022277
| 2021-10-11T08:22:46
| 2021-10-11T08:22:46
| 441,398,467
| 1
| 0
|
Apache-2.0
| 2021-12-24T07:29:21
| 2021-12-24T07:29:20
| null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
import torch
import numpy as np
import math
from transforms3d.euler import euler2mat
from core.utils.pose_utils import euler2mat_torch
def aug_poses_normal(poses, std_rot=15, std_trans=[0.01, 0.01, 0.05], max_rot=45):
"""
Args:
poses (Tensor): [n,3,4]
std_rot: deg, randomly chosen from cfg.INPUT.NOISE_ROT_STD_{TRAIN|TEST}, eg. (15, 10, 5, 2.5)
std_trans: [dx, dy, dz], cfg.INPUT.NOISE_TRANS_STD_{TRAIN|TEST}
max_rot: deg, cfg.INPUT.NOISE_ROT_MAX_{TRAIN|TEST}
Returns:
poses_aug: [n,3,4]
"""
assert poses.ndim == 3, poses.shape
poses_aug = poses.clone()
bs = poses.shape[0]
device = poses.device
if isinstance(std_rot, (tuple, list)):
std_rot = np.random.choice(std_rot)
euler_noises_deg = torch.normal(mean=0, std=std_rot, size=(bs, 3)).to(device=device)
if max_rot is not None:
euler_noises_deg = euler_noises_deg.clamp(min=-max_rot, max=max_rot)
rot_noises = euler2mat_torch(euler_noises_deg * math.pi / 180.0) # (b,3,3)
trans_noises = torch.normal(
mean=torch.zeros_like(poses[:, :3, 3]), std=torch.tensor(std_trans, device=device).view(1, 3)
)
poses_aug[:, :3, :3] = rot_noises @ poses[:, :3, :3]
poses_aug[:, :3, 3] += trans_noises
return poses_aug
def aug_poses_normal_np(poses, std_rot=15, std_trans=[0.01, 0.01, 0.05], max_rot=45):
"""
Args:
poses (ndarray): [n,3,4]
std_rot: deg, randomly chosen from cfg.INPUT.NOISE_ROT_STD_{TRAIN|TEST}
std_trans: [dx, dy, dz], cfg.INPUT.NOISE_TRANS_STD_{TRAIN|TEST}
max_rot: deg, cfg.INPUT.NOISE_ROT_MAX_{TRAIN|TEST}
Returns:
poses_aug (ndarray): [n,3,4]
"""
assert poses.ndim == 3, poses.shape
poses_aug = poses.copy()
bs = poses.shape[0]
if isinstance(std_rot, (tuple, list)):
std_rot = np.random.choice(std_rot)
euler_noises_deg = np.random.normal(loc=0, scale=std_rot, size=(bs, 3))
if max_rot is not None:
euler_noises_deg = np.clip(euler_noises_deg, -max_rot, max_rot)
euler_noises_rad = euler_noises_deg * math.pi / 180.0
rot_noises = np.array([euler2mat(*xyz) for xyz in euler_noises_rad])
trans_noises = np.concatenate(
[np.random.normal(loc=0, scale=std_trans_i, size=(bs, 1)) for std_trans_i in std_trans], axis=1
)
poses_aug[:, :3, :3] = rot_noises @ poses[:, :3, :3]
poses_aug[:, :3, 3] += trans_noises
return poses_aug
|
[
"ilovejinsha@126.com"
] |
ilovejinsha@126.com
|
dccffc2a24e794af9ca352655e7bf2a47a54601a
|
5838669d86cc572348ae16b4d50023815b5b7dd8
|
/utils/shuffle_train_val.py
|
73a4cc8d729cb90cb62fdaafde9df9a65f7a5c71
|
[] |
no_license
|
GiantPandaCV/yolov3-point
|
46dc303693138bdf2a47f2d1827be46b0cd5a958
|
11b13147556029620d920c501f2880237947f245
|
refs/heads/master
| 2022-04-11T11:49:09.952474
| 2022-03-14T02:11:04
| 2022-03-14T02:11:04
| 234,021,307
| 214
| 55
| null | 2020-07-13T13:30:02
| 2020-01-15T07:15:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
import os
import shutil
import random
train_txt = "/home/dongpeijie/datasets/dimtargetSingle/2007_train.txt"
test_txt = "/home/dongpeijie/datasets/dimtargetSingle/2007_test.txt"
val_txt = "/home/dongpeijie/datasets/dimtargetSingle/test.txt"
train_out_txt = "/home/dongpeijie/datasets/dimtargetSingle/shuffle_train.txt"
test_out_txt = "/home/dongpeijie/datasets/dimtargetSingle/shuffle_test.txt"
f_train = open(train_txt, "r")
f_test = open(test_txt, "r")
f_val = open(val_txt, "r")
o_train = open(train_out_txt, "w")
o_test = open(test_out_txt, "w")
train_content = f_train.readlines()
test_content = f_test.readlines()
val_content = f_val.readlines()
all_content = [*train_content, *test_content, *val_content]
print(len(train_content), len(test_content), len(all_content))
len_all = len(all_content)
train_percent = 0.8
# train:test = 8:2
train_sample_num = int(len_all * train_percent)
test_sample_num = len_all - train_sample_num
print("Train Sample:%d\nTest Sample:%d\n" % (train_sample_num, test_sample_num))
# print(random.sample(all_content, 10))
sampled_train = random.sample(all_content, train_sample_num)
for i in all_content:
if i in sampled_train:
o_train.write(i)
else:
o_test.write(i)
print("done")
f_test.close()
f_train.close()
f_val.close()
o_test.close()
o_train.close()
|
[
"1115957667@qq.com"
] |
1115957667@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.