blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed6150280bcf84458f2651fbd3fa61ffeb81b29b
|
20310d2c7708dbcd4ec35cad8937f34f74432059
|
/Chapter05/goodFeaturesToTrack.py
|
bd2c8143f0e06aafca9cffb512a62f64f628cc8a
|
[] |
no_license
|
jCuadra/OpenCVwithPython
|
69db8d663942edfbf7b39371b2209f90081a3873
|
ea9c8afcb290083bb62d4dcb0b299187349a7320
|
refs/heads/master
| 2021-01-16T18:34:33.192469
| 2017-06-02T09:04:20
| 2017-06-02T09:04:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
import cv2
import numpy as np
img = cv2.imread('./image/box.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray, 7, 0.05, 25)
# params
# gray: image to detect corners
# 7: the number of corners to detect
# 0.05: threshold for detecting corners
# 25: the minimum distance between corners
corners = np.float32(corners)
for item in corners:
x, y = item[0]
cv2.circle(img, (x,y), 5, 255, -1)
cv2.imshow("Top 'k' features", img)
cv2.waitKey()
|
[
"huddy1204@gmail.com"
] |
huddy1204@gmail.com
|
4e5b44cede07a7e2b8c0b027274aa44993e86d83
|
a29a5568437622310ff9e2ce61a3f114f29eb8fd
|
/all_CO2_workingWang.py
|
64f5aaad3d8fba6cbf1f540482a11d01e7926834
|
[] |
no_license
|
wrudebusch/DRI-HKTunnel
|
7bab2376e25be966d4d7e1515494632cf79c0c2d
|
6211a0acafc7d00c0db5472390626ca0745b1634
|
refs/heads/master
| 2021-01-19T02:27:48.067781
| 2016-06-07T19:15:37
| 2016-06-07T19:15:37
| 52,547,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
import pandas as pd
import glob
dateparse = lambda x: pd.datetime.strptime(x, '%m/%d/%Y %H:%M:%S')
def fix(df):
start = pd.Timestamp(long(round(df.index[0].value, -10)))
stop = pd.Timestamp(long(round(df.index[-1].value, -10)))
s = pd.DataFrame({'start' : start,'stop' : stop,'avg_ppm' : df.mean()})
s = s.set_index('start')
return s
def cut_up(data):
cut1 = data.between_time('8:00','10:00')
if len(cut1) > 1 : cut1 = fix(cut1)
cut2 = data.between_time('11:00','13:00')
if len(cut2) > 1 : cut2 = fix(cut2)
cut3 = data.between_time('14:00','16:00')
if len(cut3) > 1 : cut3 = fix(cut3)
cut4 = data.between_time('17:00','19:00')
if len(cut4) > 1 : cut4 = fix(cut4)
dfs = [cut2,cut3,cut4]
cuts = cut1.append(dfs)
#print cuts.head()
return cuts
def fix_CO2(filename):
raw = pd.read_table(filename, sep='\t', encoding='utf-16', parse_dates={'datetime': [0]}, date_parser=dateparse).convert_objects(convert_numeric=True)
data = pd.DataFrame({'datetime' : raw['datetime'],'CO2ppm': raw['CO2 ppm']})
data = data.set_index('datetime')
#data = data.convert_objects(convert_numeric=True)
print data.head()
return cut_up(data)
##main
CO2_DI_data = []
## CO2 inside
for f in glob.glob('G:\Dropbox\Hong Kong Tunnel\Downwind\CO2 Inside\*.txt'):
print f
data = fix_CO2(f)
CO2_DI_data.append(data)
CO2_DI_data = pd.concat(CO2_DI_data)
CO2_DI_data = pd.DataFrame({'CO2_DI_stop' : CO2_DI_data.stop,'CO2_DI_avg': CO2_DI_data.avg_ppm})
CO2_DO_data = []
## CO2 outside
for f in glob.glob('G:\Dropbox\Hong Kong Tunnel\Downwind\CO2 Outside\*.txt'):
print f
data = fix_CO2(f)
CO2_DO_data.append(data)
CO2_DO_data = pd.concat(CO2_DO_data)
CO2_DO_data = pd.DataFrame({'CO2_DO_stop' : CO2_DO_data.stop,'CO2_DO_avg': CO2_DO_data.avg_ppm})
CO2_UI_data = []
## CO2 inside
for f in glob.glob('G:\Dropbox\Hong Kong Tunnel\Upwind\CO2Inside\*.txt'):
print f
data = fix_CO2(f)
CO2_UI_data.append(data)
CO2_UI_data = pd.concat(CO2_UI_data)
CO2_UI_data = pd.DataFrame({'CO2_UI_stop' : CO2_UI_data.stop,'CO2_UI_avg': CO2_UI_data.avg_ppm})
CO2_UO_data = []
## CO2 outside
for f in glob.glob('G:\Dropbox\Hong Kong Tunnel\Upwind\CO2Outside\*.txt'):
print f
data = fix_CO2(f)
CO2_UO_data.append(data)
CO2_UO_data = pd.concat(CO2_UO_data)
CO2_UO_data = pd.DataFrame({'CO2_UO_stop' : CO2_UO_data.stop,'CO2_UO_avg': CO2_UO_data.avg_ppm})
dfs = [CO2_DO_data,CO2_DI_data,CO2_UI_data,CO2_UO_data]
appended_ALL_data = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='outer'), dfs)
appended_ALL_data.to_csv('CO2_data_rounded_Wang.csv')
|
[
"wrudebusch@gmail.com"
] |
wrudebusch@gmail.com
|
adc64da4e00337e98ee27de6675046ea73ffd7dd
|
0ce9a72c7d66b71f95ce34ac9103b11026a14e4e
|
/qqzone/analyze.py
|
32b7bdc00a740e0ad17c2a98998f4e1e8b5f4757
|
[
"Apache-2.0"
] |
permissive
|
yanqingda/web-crawlers
|
9eebafdb55cd9d968f8b33d2f835ad905c300cd0
|
0b4b04e5b432f2bbfd930b11cb71f0e144ad9cfe
|
refs/heads/master
| 2020-09-25T13:57:51.417822
| 2019-09-23T09:02:07
| 2019-09-23T09:02:07
| 226,018,013
| 0
| 0
| null | 2019-12-05T04:45:34
| 2019-12-05T04:45:34
| null |
UTF-8
|
Python
| false
| false
| 5,458
|
py
|
import hashlib
import time
import random
import re
import string
from urllib.parse import quote
import requests
import base
def __curl_md5(src):
# md5
m = hashlib.md5()
m.update(src.encode('utf-8'))
return m.hexdigest()
def __get_params(plus_item):
# request timestamp
t = time.time()
time_stamp = str(int(t))
# request a random string
nonce_str = ''.join(random.sample(string.ascii_letters + string.digits, 10))
# config.json
app_id = base.config['app_id']
app_key = base.config['app_key']
params = {'app_id': app_id,
'text': plus_item,
'time_stamp': time_stamp,
'nonce_str': nonce_str,
'sign': ''
}
sign_before = ''
for key in sorted(params):
if params[key] != '':
# 键值拼接过程value部分需要URL编码,URL编码算法用大写字母,例如%E8,而不是小写%e8。quote默认的大写。
sign_before += key + '=' + quote(params[key]) + '&'
# 将应用密钥以app_key为键名,组成URL键值拼接到字符串sign_before末尾
sign_before += 'app_key=' + app_key
# 对字符串S进行MD5运算,将得到的MD5值所有字符转换成大写,得到接口请求签名
sign = __curl_md5(sign_before)
sign = sign.upper()
params['sign'] = sign
return params
def get_text_feel(num):
url = "https://api.ai.qq.com/fcgi-bin/nlp/nlp_textpolar"
text_url = './results/shuoshuo/%s.txt' % num
print("[%s] <get text feel> start" % (time.ctime(time.time())))
try:
with open(text_url, encoding='utf-8') as f:
all_chaps = eval(f.read())
except Exception as e:
print("[%s] <get text feel> make sure %s.txt exists" % (time.ctime(time.time()), num))
print(e)
valid_count = 0
for plus_item in all_chaps:
plus_item, number = re.subn('[#]', "", plus_item)
plus_item, number = re.subn(r'\[(.*?)\](.*?)\[(.*?)\]', "", plus_item)
payload = __get_params(plus_item) # 获取请求参数
r = requests.get(url, params=payload)
if r.json()['ret'] == 0:
polar = r.json()['data']['polar']
print('confidence: %d, polar: %s, text: %s' % (r.json()['data']['confd'],
'负面' if polar == -1 else '中性' if polar == 0 else '正面', r.json()['data']['text']))
valid_count += 1
print("[%s] <get text feel> ok" % (time.ctime(time.time())))
print("[%s] <get text feel> %d valid, %d in total" % (time.ctime(time.time()), valid_count, len(all_chaps)))
def __gen_word_cloud(text_url, mask_url):
import jieba
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
try:
with open(text_url, encoding='utf-8') as f:
all_chaps = [chap for chap in f.readlines()]
except Exception as e:
print("[%s] <get word cloud> make sure *-seg.txt exists" % (time.ctime(time.time())))
print(e)
dictionary = []
for i in range(len(all_chaps)):
words = list(jieba.cut(all_chaps[i]))
dictionary.append(words)
# flat
tmp = []
for chapter in dictionary:
for word in chapter:
tmp.append(word.encode('utf-8'))
dictionary = tmp
# filter
unique_words = list(set(dictionary))
freq = []
for word in unique_words:
freq.append((word.decode('utf-8'), dictionary.count(word)))
# sort
freq.sort(key=lambda x: x[1], reverse=True)
# broke_words
broke_words = []
try:
with open('word/stopwords.txt') as f:
broke_words = [i.strip() for i in f.readlines()]
except Exception as e:
broke_words = STOPWORDS
# remove broke_words
freq = [i for i in freq if i[0] not in broke_words]
# remove monosyllable words
freq = [i for i in freq if len(i[0]) > 1]
img_mask = imread(mask_url)
img_colors = ImageColorGenerator(img_mask)
wc = WordCloud(background_color="white", # bg color
max_words=2000, # max words
font_path=u'./word/SourceHanSans-Regular.otf',
mask=img_mask, # bg image
max_font_size=60, # max font size
random_state=42)
wc.fit_words(dict(freq))
plt.imshow(wc)
plt.axis('off')
plt.show()
def get_zone_word_cloud(num, mask="Male"):
print("[%s] <get zone word cloud> start" % (time.ctime(time.time())))
text_url = './results/shuoshuo/%s-seg.txt' % num
mask_url = './word/alice_mask.png' if mask == "Female" else './word/boy_mask.png'
if not base.check_path(text_url):
print("[%s] <get zone word cloud> make sure %s-seg.txt exists" % (time.ctime(time.time()), num))
return
__gen_word_cloud(text_url, mask_url)
print("[%s] <get zone word cloud> ok" % (time.ctime(time.time())))
def get_pyq_word_cloud(mask="Male"):
print("[%s] <get pyq word cloud> start" % (time.ctime(time.time())))
text_url = './results/pyq/pyq-seg.txt'
mask_url = './word/alice_mask.png' if mask == "Female" else './word/boy_mask.png'
if not base.check_path(text_url):
print("[%s] <get pyq word cloud> make sure pyq-seg.txt exists" % (time.ctime(time.time())))
return
__gen_word_cloud(text_url, mask_url)
print("[%s] <get pyq word cloud> ok" % (time.ctime(time.time())))
|
[
"96486d9b@gmail.com"
] |
96486d9b@gmail.com
|
3e92f309ef61231db2fa56989217b3ba6eb86326
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/swagger_client/models/conflict_error.py
|
a40924bde282c121008b4b6801a38516e4f056f1
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444
| 2021-04-04T00:00:52
| 2021-04-04T00:00:52
| 354,419,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,439
|
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class ConflictError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'ConflictErrorCode'
}
attribute_map = {
'code': 'code'
}
def __init__(self, code=None, _configuration=None): # noqa: E501
"""ConflictError - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._code = None
self.discriminator = None
if code is not None:
self.code = code
@property
def code(self):
"""Gets the code of this ConflictError. # noqa: E501
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:return: The code of this ConflictError. # noqa: E501
:rtype: ConflictErrorCode
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ConflictError.
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:param code: The code of this ConflictError. # noqa: E501
:type: ConflictErrorCode
"""
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConflictError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConflictError):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConflictError):
return True
return self.to_dict() != other.to_dict()
|
[
"dan@leftcoastfs.com"
] |
dan@leftcoastfs.com
|
0726b7390ed35387b3d0eef50a1cb7a3d9aa9f8a
|
3044d26f03f23e8e8c5fcec57b78bfffe0fa0bd3
|
/case/workflow_FinancialClass_samplepayment_FlowSamplePayment/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.py
|
dad86a28c42e2c435c7fe2f5a98f76c2af6bf9d8
|
[] |
no_license
|
tian848-tim/trunk
|
de50a153c8cab3c81c79c523256a6f1b4c2f049d
|
cd52afdd003f094056dc2ea877c823a38e6a26fd
|
refs/heads/master
| 2022-11-20T06:43:35.540105
| 2020-07-20T07:48:26
| 2020-07-20T07:48:26
| 281,048,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,324
|
py
|
'''
测试用例标题:样品付款测试
测试场景:样品付款业务流程测试——采购组长退回
创建者:Tim
创建日期:2018-11-13
最后修改日期:2018-11-13
输入数据:审批流程各个角色账号
输出数据:无
'''
# -*- coding: utf-8 -*-
import sys, os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
# sys.path.append(rootPath)
import unittest
from cgitb import text
import selenium.webdriver.support.ui as ui
from selenium import webdriver
from time import sleep
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest, configparser
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchElementException
import random
import json
'''
加载配置选项
'''
cfg = configparser.ConfigParser()
cfg.read(rootPath + '/core/config.ini')
class FlowSamplePayment(unittest.TestCase):
base_url = cfg.get("projects", "base_url")
project_path = cfg.get("projects", "project_path")
log_path = cfg.get("webdriver", "log") + '/' + cfg.get("webdriver", "logfile") + '-%s.log' % time.strftime(
"%Y-%m-%d %H_%M_%S")
def loadvendername(self):
global result
file = open(rootPath + '/data/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.json', encoding='utf-8')
data = json.load(file)
result = [(d['username'], d['password']) for d in data['login']]
return result
def loadvendernames(self):
global results
file = open(rootPath + '/data/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.json', encoding='utf-8')
data = json.load(file)
results = [(d['name']) for d in data['use_vendorname']]
return results
def setUp(self):
# 脚本标识-标题
self.script_name = '样品付款申请——采购组长退回'
# 脚本标识-ID
self.script_id = 'workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return'
self.target_url = self.base_url + self.project_path
if (cfg.get("webdriver", "enabled") == "off"):
# 如果使用最新firefox需要屏蔽下面这句
self.driver = webdriver.Firefox()
else:
# 如果使用最新firefox需要使用下面这句
self.driver = webdriver.Firefox(log_path=self.log_path)
self.verificationErrors = []
self.accept_next_alert = True
self.driver.implicitly_wait(15)
self.driver.maximize_window()
# 定义登录方法
def login(self, username, password):
self.driver.get(self.target_url) # 登录页面
self.driver.find_element_by_id('account-inputEl').send_keys(username)
self.driver.find_element_by_id('password-inputEl').send_keys(password)
self.driver.find_element_by_xpath("//*[@id='LoginWin']//span[contains(@class,'x-btn-icon-el')]").click()
def test_FlowSamplePayment(self):
su = self.loadvendername()
ad = self.loadvendernames()
for i in range(0, len(su)):
print(su[i][0])
print(su[i][1])
self.login(su[0][0],su[0][1])
#self.login('Vic_cn','123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到申请单据
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]").click()
sleep(2)
# 定位到样品申请
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '样品申请')]").click()
sleep(2)
# 定位到样品申请新建
self.driver.find_element_by_xpath("//*[@id='FlowSampleView']//span[contains(@class,'fa-plus')]").click()
sleep(2)
## 选择供应商
self.driver.find_element_by_xpath( "//*[@id='FlowSampleViewFormPanelID-body']//input[@name='main.vendorName']").click()
sleep(2)
if ad[0] != '':
## 定位到关键字
self.driver.find_element_by_xpath("//*[@id='VendorDialogWinSearchPanelID-innerCt']//input[@name='keywords']").send_keys(ad[0])
sleep(2)
# 点击搜索
self.driver.find_element_by_xpath("//*[@id='VendorDialogWinSearchPanelID-innerCt']//span[contains(@class,'fa-search')]").click()
sleep(2)
# 定位供应商第一条记录
_elementFirst = self.driver.find_element_by_xpath("//*[@id='VendorDialogWinGridPanelID-body']//div[contains(text(), '1')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementFirst).perform()
else:
_elementFiveth = (random.randint(1, 10))
# 定位供应商第一条记录
_elementFirst = self.driver.find_element_by_xpath("//*[@id='VendorDialogWinGridPanelID-body']//div[text()='{}']".format(_elementFiveth))
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementFirst).perform()
sleep(2)
# 定位添加样品按钮'''
_elementSecond = self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID_header-body']//img[contains(@class,'x-tool-plus')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementSecond).perform()
sleep(2)
# 定位样品第一条记录
_elementThird = self.driver.find_element_by_xpath("//*[@id='ProductDialogWinGridPanelID-body']//div[contains(text(), '1')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementThird).perform()
sleep(2)
# 点击确认
self.driver.find_element_by_xpath("//*[@id='ProductDialogWinID']//span[contains(@class,'fa-check')]").click()
sleep(2)
# 点击aud
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[5]").click()
sleep(2)
# 清除输入框
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeAud']").clear()
sleep(2)
## 定位到AUD输入
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeAud']").send_keys('10')
sleep(2)
# 点击样品件数
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[8]").click()
sleep(2)
## 清除样品件数
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='qty']").clear()
sleep(2)
## 定位到样品件数
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='qty']").send_keys('10')
sleep(2)
# 定位到费用可退
self.driver.find_element_by_xpath( "//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[12]").click()
sleep(2)
# 清除输入框
self.driver.find_element_by_xpath( "//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeRefund']").clear()
sleep(2)
## 定位费用可退
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeRefund']").send_keys('1')
sleep(2)
# 定位到费用可退
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[12]").click()
sleep(2)
# 点击发启
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class,'fa-play')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(5)
try:
self.driver.find_element_by_xpath("//*[@id='FlowSampleViewGridPanelID-body']/div/table/tbody/tr[1]//span[contains(text(), '{}')]".format('调整申请')).is_displayed()
a = True
except:
a = False
if a == True:
print("元素存在")
elif a == False:
print("元素不存在")
sleep(2)
if a == True:
# 选择新品开发第一条记录
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleViewGridPanelID-body']//div[contains(text(), '1')]").click()
# 强制等待
sleep(2)
# 定位到新品开发编辑
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleViewMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(text(),'{}')]".format(su[1][0])).click()
sleep(2)
# 定位到发启按钮
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class,'fa-check-square')]").click()
# 获取弹窗提示:
self.driver.implicitly_wait(30)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
# 强制等待
sleep(5)
else:
pass
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(2)
self.login(su[1][0],su[1][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到工作面板
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]").click()
sleep(2)
# 定位到待办事项
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]").click()
sleep(2)
# 定位到待办事项第一条记录
self.driver.find_element_by_xpath("//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 点击马上处理
self.driver.find_element_by_xpath("//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]").click()
sleep(2)
# 点击通过
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class, 'fa-check-square')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(3)
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(2)
'''样品付款'''
self.login(su[0][0],su[0][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到申请单据
self.driver.find_element_by_xpath( "//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]").click()
sleep(2)
# 定位到船务类
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '财务类')]").click()
sleep(3)
# 定位到样品付款申请
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '样品付款申请')]").click()
sleep(2)
# 定位到样品付款申请第一条记录
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 定位到样品付款申请编辑
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
# 定位到实付金额
_elementFiveth =self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.totalSampleFeeAud']").get_attribute("value")
sleep(2)
# 定位到实付金额
self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.paymentTotalSampleFeeAud']").clear()
sleep(2)
#_elementFiveth = (random.randint(0, 1000))
#sleep(2)
# 定位到实付金额
self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.paymentTotalSampleFeeAud']").send_keys(_elementFiveth)
sleep(2)
# 定位到发启
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class,'fa-play')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(10)
try:
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentGridPanelID-body']/div/table/tbody/tr[1]//span[contains(text(), '{}')]".format('调整申请')).is_displayed()
a = True
except:
a = False
if a == True:
print("元素存在")
elif a == False:
print("元素不存在")
sleep(2)
if a == True:
# 选择新品开发第一条记录
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentGridPanelID-body']//div[contains(text(), '1')]").click()
# 强制等待
sleep(2)
# 定位到新品开发编辑
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(text(),'{}')]".format(su[1][0])).click()
sleep(2)
# 定位到发启按钮
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class,'fa-check-square')]").click()
# 获取弹窗提示:
self.driver.implicitly_wait(30)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
# 强制等待
sleep(5)
else:
pass
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(5)
'''第一节点审核'''
self.login(su[1][0],su[1][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到工作面板
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]").click()
sleep(2)
# 定位到待办事项
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]").click()
sleep(2)
# 定位到待办事项第一条记录
self.driver.find_element_by_xpath("//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 点击马上处理
self.driver.find_element_by_xpath(
"//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]").click()
sleep(2)
# 分配处理人
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
# 选择第一项
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(@class, 'x-boundlist-item-over')]").click()
sleep(2)
# 定位iframe
self.driver.switch_to.frame(self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//iframe[contains(@class,'cke_wysiwyg_frame cke_reset')]"))
sleep(2)
# 输入内容
self.driver.find_element_by_class_name("cke_show_borders").send_keys('test')
sleep(2)
# 退出iframe
self.driver.switch_to.default_content()
sleep(2)
# 点击退回
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class, 'fa-reply')]").click()
sleep(5)
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(5)
def isElementExist(self, link):
flag = True
try:
self.driver.find_element_by_xpath(link)
print('元素找到')
return flag
except:
flag = False
print('未找到')
return flag
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
[
"tim.long@Newaim01.com"
] |
tim.long@Newaim01.com
|
56de729d7a7b75d2cb74bb8c681c2ba54b94ae29
|
638558203b38d60b1716f6d87ccc8d01587ed99c
|
/HeapPriorityQueue.py
|
92f0f659aae10a37abd99fd585a953cd671bcd7b
|
[] |
no_license
|
cluntsao/python-learning
|
b3cb08c527f037cd040a972494b239c32294537a
|
4e61be66e85d82d63349869a8ee72c7ae6b94ee5
|
refs/heads/master
| 2020-03-24T05:55:38.298430
| 2018-08-10T03:46:52
| 2018-08-10T03:46:52
| 142,508,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from PriorityQueueBase import PriorityQueueBase
class Empty(Exception):
pass
class HeapPriorityQueue(PriorityQueueBase):
"""Non-public behaviors"""
def _parent(self, j):
return (j - 1) // 2
def _left(self, j):
return 2*j + 1
def _right(self, j):
return 2*j + 2
def _has_left(self, j):
return self._left(j) < len(self._data)
def _has_right(self, j):
return self._right(j) < len(self._data)
def _swap(self, i, j):
self._data[i], self._data[j] = self._data[j], self._data[i]
def _upheap(self, j):
parent = self._parent(j)
if j > 0 and self._data[j] < self._data[parent]:
self._swap(j, parent)
self._upheap(parent)
def _downheap(self, j):
""" IMPORTANT """
if self._has_left(j):
left = self._left(j)
small_child = left
if self._has_right(j):
right = self._right(j)
if self._data[right] < self._data[left]:
small_child = right
if self._data[small_child] < self._data[j]:
self._swap(j, small_child)
self._downheap(small_child)
"""Public Behavior"""
def __init__(self):
self._data = []
def __len__(self):
return len(self._data)
def add(self, key, value):
self._data.append(self._Item(key, value))
self._upheap(len(self._data) - 1)
def min(self):
""" Return but do not remove (k, v) tuple with minimum key """
if self.is_empty():
raise Empty("Empty!!!!!")
item = self._data[0]
return item._key, item._value
def remove_min(self):
if self.is_empty():
raise Empty("Empty!!!!!!")
self._swap(0, len(self._data) - 1)
item = self._data.pop()
self._downheap(0)
return item._key, item._value
"""
In short, each priority queue ADT methods can be performed in O(1) or in O(log n)
len, is_empty : O(1)
P.min : O(1)
P.add : O(log n)
P.remove_min : O(log n)
"""
|
[
"tsaochelun@gmail.com"
] |
tsaochelun@gmail.com
|
c1c25798337a14a9824606e5595643103dd4f1c4
|
5c72598be125082f8cd9df3555ff6bcc0cfa906d
|
/pytorch-semseg-model/ptsemseg/loader/pascal_voc_loader.py
|
43ff94d21ec7e121c312f4bb6435dc9b646a3e3f
|
[
"MIT"
] |
permissive
|
yyfyan/fast_segmentation_code
|
d12b663960033804bde8699e24ef90de1ec350ef
|
3784a09585e41e384cbc1e0a1ebc1ae05642905a
|
refs/heads/master
| 2020-03-30T06:12:40.023843
| 2018-09-27T07:16:53
| 2018-09-27T07:16:53
| 150,845,503
| 1
| 0
| null | 2018-09-29T08:57:18
| 2018-09-29T08:57:18
| null |
UTF-8
|
Python
| false
| false
| 5,715
|
py
|
import os
import collections
import json
import torch
import torchvision
import numpy as np
import scipy.misc as m
import scipy.io as io
import matplotlib.pyplot as plt
# from tqdm import tqdm
from torch.utils import data
def get_data_path(name):
js = open('config.json').read()
data = json.loads(js)
return data[name]['data_path']
class pascalVOCLoader(data.Dataset):
def __init__(self, root, split="train_aug", is_transform=False, img_size=512):
self.root = root
self.split = split
self.is_transform = is_transform
self.n_classes = 21
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.files = collections.defaultdict(list)
for split in ["train", "val", "trainval"]:
file_list = tuple(open(root + '/ImageSets/Segmentation/' + split + '.txt', 'r'))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
if not os.path.isdir(self.root + '/SegmentationClass/pre_encoded'):
self.setup(pre_encode=True)
else:
self.setup(pre_encode=False)
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_name = self.files[self.split][index]
img_path = self.root + '/JPEGImages/' + img_name + '.jpg'
lbl_path = self.root + 'SegmentationClass/' + img_name + '.png'
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.int32)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
img = m.imresize(img, (self.img_size[0], self.img_size[1]))
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
lbl[lbl==255] = 0
lbl = lbl.astype(float)
lbl = self.encode_segmap(lbl)
lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
lbl = lbl.astype(int)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_pascal_labels(self):
return np.asarray([[0,0,0], [128,0,0], [0,128,0], [128,128,0], [0,0,128], [128,0,128],
[0,128,128], [128,128,128], [64,0,0], [192,0,0], [64,128,0], [192,128,0],
[64,0,128], [192,0,128], [64,128,128], [192,128,128], [0, 64,0], [128, 64, 0],
[0,192,0], [128,192,0], [0,64,128]])
def encode_segmap(self, mask):
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for i, label in enumerate(self.get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = i
label_mask = label_mask.astype(int)
return label_mask
def decode_segmap(self, temp, plot=False):
label_colours = self.get_pascal_labels()
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = label_colours[l, 0]
g[temp == l] = label_colours[l, 1]
b[temp == l] = label_colours[l, 2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def setup(self, pre_encode=False):
sbd_path = get_data_path('sbd')
voc_path = get_data_path('pascal')
target_path = self.root + '/SegmentationClass/pre_encoded/'
if not os.path.exists(target_path):
os.makedirs(target_path)
# sbd_train_list = tuple(open(sbd_path + 'dataset/train.txt', 'r'))
sbd_train_list = tuple(open(voc_path + 'ImageSets/Segmentation/train.txt', 'r'))
sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]
self.files['train_aug'] = self.files['train'] + sbd_train_list
if pre_encode:
print("Pre-encoding segmentation masks...")
for i in tqdm(sbd_train_list):
lbl_path = sbd_path + 'dataset/cls/' + i + '.mat'
lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(target_path + i + '.png', lbl)
for i in tqdm(self.files['trainval']):
lbl_path = self.root + '/SegmentationClass/' + i + '.png'
lbl = self.encode_segmap(m.imread(lbl_path))
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(target_path + i + '.png', lbl)
if __name__ == '__main__':
local_path = '/home/neuron/Desktop/Donghao/cellsegmentation/normalCV/VOC2012'
dst = pascalVOCLoader(local_path, is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
plt.imshow(dst.decode_segmap(labels.numpy()[i+1]))
plt.show()
|
[
"dzha9516@uni.sydney.edu.au"
] |
dzha9516@uni.sydney.edu.au
|
6085a2cfbcde968d0ed001eb7a49d5bebfa6aa75
|
817a97680e85142634c3e7c66a3e0a0e5eceaffd
|
/sma_cross_vol.py
|
d1c0f856afe1d5f0f00c0bc6834541cf33e6a4d0
|
[] |
no_license
|
johndpope/algotrading
|
4cca78db99af8fef0d1fc57aac3104bd0e8a895c
|
f2f527f85aad6cce928f1c2e9794f9217efcce93
|
refs/heads/master
| 2021-06-24T15:24:53.136691
| 2017-08-27T16:13:55
| 2017-08-27T16:13:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
from datetime import datetime, timedelta
import backtrader as bt
class SMACrossVolumeStrategy(bt.SignalStrategy):
params = dict(
diff=0.01,
limit=0.005,
limdays=10,
limdays2=1000,
maperiod_small=30,
maperiod_big=30,
)
def __init__(self):
self.order = None
self.dataclose = self.datas[0].close
self.datavol = self.datas[0].volume
self.sma_small = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_small
)
self.sma_big = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_big
)
def log(self, txt, dt=None, doprint=False):
'''Logging function fot this strategy'''
if doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def stop(self):
self.log('(MA Period Small: %2d | MA Period Big: %2d) Ending Value %.2f' %
(self.p.maperiod_small, self.p.maperiod_big, self.broker.getvalue()), doprint=True)
def next(self):
if self.order:
return
if not self.position:
if self.sma_small[0] > self.sma_big[0] and self.sma_small[-1] < self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.buy()
else:
if self.sma_small[0] < self.sma_big[0] and self.sma_small[-1] > self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.sell()
cerebro = bt.Cerebro()
strats = cerebro.optstrategy(
SMACrossVolumeStrategy,
maperiod_small=range(2, 10),
maperiod_big=range(10, 20),
)
data = bt.feeds.GenericCSVData(
dataname='eur_usd_1d.csv',
separator=',',
dtformat=('%Y%m%d'),
tmformat=('%H%M00'),
datetime=0,
time=1,
open=2,
high=3,
low=4,
close=5,
volume=6,
openinterest=-1
)
# data = bt.feeds.YahooFinanceData(dataname='YHOO', fromdate=datetime(2011, 1, 1),
# todate=datetime(2012, 12, 31))
cerebro.adddata(data)
cerebro.addsizer(bt.sizers.FixedSize, stake=50)
# cerebro.addstrategy(SimpleSMAStrategy)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# cerebro.run()
# cerebro.plot()
|
[
"lifanov.a.v@gmail.com"
] |
lifanov.a.v@gmail.com
|
d51989b8a2d4b06d6222328761a11df153f3d1d5
|
653fa1ead69f115d458ceb486374a63e240dc298
|
/ana.py
|
8dc4c703e74e83dcd8c91623a56904499bf9fbcb
|
[
"MIT"
] |
permissive
|
CanDenizKas/teksayiguncelleme
|
ab9d809e6fd7bb7836d5461088349600bb889858
|
92cc24b2bb2a3933a3f3286f11b4c3db909d4a16
|
refs/heads/main
| 2023-02-27T12:34:47.394193
| 2021-02-06T12:28:22
| 2021-02-06T12:28:22
| 335,058,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
bos_liste = []
top_sayi = int(input('Toplam kaç sayı olsun ? :: '))
for n in range(top_sayi):
sayilar = int(input('Sayi Gir '))
bos_liste.append(sayilar)
for j in bos_liste:
if j % 2 == 0:
continue
else:
print("Listedeki En Yüksek Tek Sayı :", max(bos_liste))
break
|
[
"candenizkas@gmail.com"
] |
candenizkas@gmail.com
|
f7ecdb2d699ad87445645dbd7746ea96b41b3189
|
56b511013e480879810c732f3190f5746b0aa733
|
/TP2-Agentes Racionales/CODE/randomagent.py
|
5b93c5573b903541efd6fdc4733554bf1db06a14
|
[] |
no_license
|
Fabriexe/ia-uncuyo-2021
|
86e30231aabd07cddebf16b62baba94ef771e3e6
|
6a32f2f5eb1e281148fbc1c0f6a659fc1e392f04
|
refs/heads/main
| 2023-08-23T00:07:40.256247
| 2021-11-06T05:37:46
| 2021-11-06T05:37:46
| 395,062,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
from enviroment import*
class randomagent:
def __init__(self,env,life,x,y):
self.env=env
self.reslife=life
self.posX=x
self.posY=y
self.cleaned=0
def perspectiva(self):
acciones=[]
X=self.env.sX
Y=self.env.sY
if self.posX>0 and self.posX<X-1:
acciones.append(1)
acciones.append(2)
elif self.posX==0:
acciones.append(2)
else:
acciones.append(1)
if self.posY>0 and self.posY<Y-1:
acciones.append(3)
acciones.append(4)
elif self.posY==0:
acciones.append(3)
else:
acciones.append(4)
return acciones
def nada(self):
print("",end="")
def suck(self):
self.env.clean(self.posX,self.posY)
self.cleaned+=1
def arriba(self):
self.posY+=1
def derecha(self):
self.posX-=1
def abajo(self):
self.posY-=1
def izquierda(self):
self.posX+=1
def performance(self):
return ("Lo limpiado es: "+str(self.cleaned)+" cuadriculas ")
def think(self):
acci=self.perspectiva()
acci.append(5)#Se agregan 2 acciones mas a las acciones posibles y elige una al azar
acci.append(6)
num=randint(0,len(acci)-1)
if acci[num]==1:
self.derecha()
elif acci[num]==2:
self.izquierda()
elif acci[num]==3:
self.arriba()
elif acci[num]==4:
self.abajo()
elif acci[num]==5:
self.suck()
elif acci[num]==6:
self.nada()
self.reslife-=1
|
[
"fabricioexe54@gmail.com"
] |
fabricioexe54@gmail.com
|
42e05df749e38cf8218a779f4fdd5ae120642485
|
33170d202d96ae9d6a98f6d9be41e87361ba3bd3
|
/demo7_tensorboard.py
|
48f7564472e1b793c2ac3c4ad1607cb8c603b702
|
[] |
no_license
|
Seancheey/tensorflow-practice
|
cfdd2338f6d139491c5dda361b74beef64ecec01
|
a11f1c8f14e4ccf8d92b8e051ac4cf95b0695607
|
refs/heads/master
| 2021-01-01T16:18:00.961347
| 2017-07-24T12:33:55
| 2017-07-24T12:33:55
| 97,805,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
import tensorflow as tf
import subprocess
import numpy as np
### define a neuron network that can be displayed in tensorboard --- a visualization tool supported by tensorflow
# define a add layer function
def add_layer(inputs, in_size, out_size, activation_function=tf.nn.relu):
# define name scope to help visualizing neuron layers
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
with tf.name_scope('biases'):
Biases = tf.Variable(tf.random_normal([1, out_size]))
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), Biases)
return activation_function(Wx_plus_b)
# define name scope to help visualizing input block
with tf.name_scope('inputs'):
# define inputs with their names
xs = tf.placeholder(tf.float32, [None, 1], 'x-input')
ys = tf.placeholder(tf.float32, [None, 1], 'y-input')
layer1 = add_layer(xs, 1, 10)
layer2 = add_layer(layer1, 10, 1)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - layer2), reduction_indices=[1]))
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.03).minimize(loss)
# create a x-y relationship of y = x**2 - 0.5 with some noise
x_data = np.linspace(-1, 1, 300, dtype=np.float32)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.5 + noise
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(train_step, feed_dict={xs: x_data, ys: y_data})
# write the graph summary into logs/
writer = tf.summary.FileWriter('logs/', session.graph)
# execute command to open a 6006 port in localhost that visualize data
subprocess.getoutput('tensorboard --logdir logs')
# after that, type 0.0.0.0:6006/ or localhost:6006/ in any browser to check tensorboard
|
[
"adls371@163.com"
] |
adls371@163.com
|
d82f94f00d325bf9852dbc2c2fa28d95a8432598
|
a1f8980902374340cbe646a0685135681e2aec28
|
/BBS/urls.py
|
2bfc188803720225a010d65f130b283ce472d556
|
[] |
no_license
|
gaofei207/BBS_SE
|
99b275fac85de3ff9a2f9acdfe147be53d022cbe
|
7110c8491c2518b23fbe2895501c5dd2770f200d
|
refs/heads/master
| 2023-01-14T03:33:29.837211
| 2020-11-11T13:17:18
| 2020-11-11T13:17:18
| 109,016,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
"""BBS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from app01 import views,urls
import app01
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^register/$', views.Register),
url(r'^login/$', views.Login),
url(r'^acc_login/$', views.acc_login),
url(r'^acc_register/$', views.acc_register),
url(r'^logout/$', views.logout_view),
url(r'',include(app01.urls) ),
url(r'^search/', include('haystack.urls')),
]
|
[
"gaofei207@github.com"
] |
gaofei207@github.com
|
d22c6a57747ea40840a81f5d73cb4e36e344a310
|
ed84a727dfcde9481668d0317ca3b2c80e58ad0f
|
/set2_mul.py
|
c892b8f895e2d69f6fd2168437167d2ccee2363b
|
[] |
no_license
|
Boorneeswari/GUVI_PROGRAMS
|
2458c47da89a3cf1450b5b20eea7e34fba7dd051
|
0f1648f95064043bba93195062954a0afe67ed66
|
refs/heads/master
| 2020-03-22T21:15:50.236733
| 2018-10-03T13:05:47
| 2018-10-03T13:05:47
| 140,672,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
num=int(input())
mul=5
x=0
for i in range(1,mul+1):
x=num*i
print x,
|
[
"noreply@github.com"
] |
noreply@github.com
|
0ca81aac25082737c0a5c15d50d0084496ebc998
|
e69faa154bd6e4fecacde8ef4365956678924a92
|
/stk-screen/bin/wheel
|
94bb4e1318eb3b6b195e0db27b324f648411c5a6
|
[] |
no_license
|
aman-saha/stk-app
|
239e7258dbe03d939b56cc2e240bc5b0f179a57a
|
83bbad871c600c383c1618ed13c6e4f49a32f607
|
refs/heads/master
| 2022-04-02T08:11:37.944564
| 2020-02-13T03:20:16
| 2020-02-13T03:20:16
| 229,473,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/Users/amsaha/workspaces/git_proj/stk-app/stk-screen/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aman97ram@gmail.com"
] |
aman97ram@gmail.com
|
|
40d4849bbc2eaf4a84128ba8c1fdc12a9548dde1
|
16450d59c820298f8803fd40a1ffa2dd5887e103
|
/baekjoon/5622.py
|
d81981f661aa57dc341a4a724cc55527ebc3158a
|
[] |
no_license
|
egyeasy/TIL_public
|
f78c11f81d159eedb420f5fa177c05d310c4a039
|
e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38
|
refs/heads/master
| 2021-06-21T01:22:16.516777
| 2021-02-02T13:16:21
| 2021-02-02T13:16:21
| 167,803,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
"""
상근이의 할머니는 아래 그림과 같이 오래된 다이얼 전화기를 사용한다.
전화를 걸고 싶은 번호가 있다면, 숫자를 하나를 누른 다음에 금속 핀이 있는 곳 까지 시계방향으로 돌려야 한다. 숫자를 하나 누르면 다이얼이 처음 위치로 돌아가고, 다음 숫자를 누르려면 다이얼을 처음 위치에서 다시 돌려야 한다.
숫자 1을 걸려면 총 2초가 필요하다. 1보다 큰 수를 거는데 걸리는 시간은 이보다 더 걸리며, 한 칸 옆에 있는 숫자를 걸기 위해선 1초씩 더 걸린다.
상근이의 할머니는 전화 번호를 각 숫자에 해당하는 문자로 외운다. 즉, 어떤 단어를 걸 때, 각 알파벳에 해당하는 숫자를 걸면 된다. 예를 들어, UNUCIC는 868242와 같다.
할머니가 외운 단어가 주어졌을 때, 이 전화를 걸기 위해서 필요한 시간을 구하는 프로그램을 작성하시오.
> 입력
첫째 줄에 알파벳 대문자로 이루어진 단어가 주어진다. 단어는 2글자~15글자로 이루어져 있다.
UNUCIC
> 출력
첫째 줄에 다이얼을 걸기 위해서 필요한 시간을 출력한다.
36
"""
num_list = [2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9]
word = input()
result = 0
for i in word:
idx = ord(i) - 65
result += num_list[idx] + 1
print(result)
# 생각
# 1. 숫자 - 알파벳 간 규칙을 최대한 찾아내서 적은 노가다로 짜는 방법도 있을 듯.
|
[
"dz1120@gmail.com"
] |
dz1120@gmail.com
|
f0d87bc6ed10a1caabc7f8732994e85fec06d724
|
a416bcbba315b8a407fcd21dee082cceee8a6e5e
|
/print/print.py
|
ab0c381ba973a8ce80dcb391d544ecbcd17f8399
|
[
"MIT"
] |
permissive
|
Devendrabhat/print
|
f3e05f86fb5627a8b416dc7841c0a6c1b1fa9391
|
1316e55dcedb58ebd552ea8c5587cf20bec825c5
|
refs/heads/master
| 2020-04-12T11:19:57.951634
| 2018-12-15T22:56:46
| 2018-12-15T22:56:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
"""ASCII Large Text and ANSI escape sequence print overloading
Examples
--------
print("Testing", RED, BOLD)
print("ASCII Art", BLUE+BR, BIG)
"""
import sys
import os
import re
from . import putil
# Try to import pyfiglet
try:
import pyfiglet
def __pf_render(s, f):
return s if f is None else pyfiglet.Figlet(f).renderText(s)
except ImportError:
def __pf_render(s, f):
return s
print("PyFiglet not found (Large ASCII word art disabled)")
# Check for windows
if os.name == 'nt':
try:
import colorama
except ImportError:
raise Exception(
"Must have colorama installed for color translation on windows "
"systems")
colorama.init(wrap=False)
__stream = colorama.AnsiToWin32(sys.stderr).stream
else:
__stream = None
def __esc(code):
"""Get ANSI escape string"""
return "\u001b[{c}m".format(c=code)
def __get_font(args):
return next((i for i in args if type(i) == str), None)
def render(s, *args):
"""Render text with color and font"""
mods = "".join([__esc(i) for i in args if type(i) == int])
s = mods + __pf_render(s, __get_font(args))
# Remove trailing newline
if s[-1] == '\n':
s = s[:-1]
# Add escape
return s + __esc(0)
# Save print statement
__print = print
# Regex to clear ANSI escape characters
ANSI_ESCAPE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def print(s, *args):
"""Print statement overloading"""
global __stream
# Linux
if __stream is None:
__print(render(str(s), *args))
# Windows
else:
__print(render(str(s), *args), file=__stream)
# Logging
if putil.LOG_FILE is not None:
with open(putil.LOG_FILE, 'a') as log:
global ANSI_ESCAPE
log.write(ANSI_ESCAPE.sub('', render(s)) + '\n')
log.close()
if __name__ == "__main__":
print("print.py", )
|
[
"thetianshuhuang@gmail.com"
] |
thetianshuhuang@gmail.com
|
c5d6b42f2836cb90508dfb929e9c065b433edca9
|
390ef873925b9a302b31f8904de0a4995cdab4c9
|
/leerTabla.py
|
e8dfdab38a6f463336c71ccbc24387295c49dfe3
|
[
"MIT"
] |
permissive
|
AbeJLazaro/MinimizacionAF
|
26a1d71713600e344541dbd30185f865814853c9
|
1fed781e401c9c929618b6641b202cbeba34c1af
|
refs/heads/main
| 2023-01-03T17:18:56.365338
| 2020-10-27T05:53:40
| 2020-10-27T05:53:40
| 307,597,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
'''
Autor: Lázaro Martínez Abraham Josué
Fecha versión: 26-10-2020
Programa: Implementación de una función para leer una tabla
que representa una tabla de transiciones del AF
'''
def leerDatos(nombre):
'''Función que lee el archivo de la tabla de transición
para representarla en forma de diccionarios para después
minimizarlos
Parámetros
nombre: nombre del archivo
return: tabla de transición
'''
try:
# se lee el archivo
File = open(nombre,"r")
datos = File.readlines()
File.close()
except IOError as ioe:
print("Error al abrir el archivo")
print(ioe)
return
except Exception as e:
print("Ocurrió otro error")
raise e
else:
# se quitan los saltos de línea
for i in range(len(datos)):
datos[i]=datos[i].replace("\n","")
# se rescatan los caracteres
caracteres = datos.pop(0).split(",")[1:]
# se inicializa la tabla de transiciones
tablaTransicion={}
# para cada línea en la lista de datos
for línea in datos:
# se separan los datos por medio de las comas
información = línea.split(",")
# se saca el primer elemento separado que representa el estado
estado = información.pop(0)
# se genera la lista de transiciones con la información restante y
# la lista de caracteres
listaTransicion=dict(zip(caracteres,información))
# se revisa si el estado tiene un * para representar en la tabla que
# se trata de un estado de aceptación
if "*" in estado:
listaTransicion["aceptación"]=True
estado = estado.replace("*","")
else:
listaTransicion["aceptación"]=False
if "->" in estado:
listaTransicion["inicio"]=True
estado = estado.replace("->","")
else:
listaTransicion["inicio"]=False
# se agrega esta información a la tabla de transiciones
tablaTransicion[estado]=listaTransicion
return tablaTransicion
if __name__ == '__main__':
nombre = "tabla.csv"
a=leerDatos(nombre)
for estado,transiciones in a.items():
print(estado,":",transiciones)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a3ee575e7318f6ded972fa7288d9b79b53f4f0e7
|
302442c32bacca6cde69184d3f2d7529361e4f3c
|
/cidtrsend-all/stage2-model/pytz/zoneinfo/Navajo.py
|
1b27ae20abe14d05ef0286e1b3a242389516aafd
|
[] |
no_license
|
fucknoob/WebSemantic
|
580b85563072b1c9cc1fc8755f4b09dda5a14b03
|
f2b4584a994e00e76caccce167eb04ea61afa3e0
|
refs/heads/master
| 2021-01-19T09:41:59.135927
| 2015-02-07T02:11:23
| 2015-02-07T02:11:23
| 30,441,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,131
|
py
|
'''tzinfo timezone information for Navajo.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Navajo(DstTzInfo):
'''Navajo timezone definition. See datetime.tzinfo for details'''
zone = 'Navajo'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,9,0,0),
d(1918,10,27,8,0,0),
d(1919,3,30,9,0,0),
d(1919,10,26,8,0,0),
d(1920,3,28,9,0,0),
d(1920,10,31,8,0,0),
d(1921,3,27,9,0,0),
d(1921,5,22,8,0,0),
d(1942,2,9,9,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,8,0,0),
d(1965,4,25,9,0,0),
d(1965,10,31,8,0,0),
d(1966,4,24,9,0,0),
d(1966,10,30,8,0,0),
d(1967,4,30,9,0,0),
d(1967,10,29,8,0,0),
d(1968,4,28,9,0,0),
d(1968,10,27,8,0,0),
d(1969,4,27,9,0,0),
d(1969,10,26,8,0,0),
d(1970,4,26,9,0,0),
d(1970,10,25,8,0,0),
d(1971,4,25,9,0,0),
d(1971,10,31,8,0,0),
d(1972,4,30,9,0,0),
d(1972,10,29,8,0,0),
d(1973,4,29,9,0,0),
d(1973,10,28,8,0,0),
d(1974,1,6,9,0,0),
d(1974,10,27,8,0,0),
d(1975,2,23,9,0,0),
d(1975,10,26,8,0,0),
d(1976,4,25,9,0,0),
d(1976,10,31,8,0,0),
d(1977,4,24,9,0,0),
d(1977,10,30,8,0,0),
d(1978,4,30,9,0,0),
d(1978,10,29,8,0,0),
d(1979,4,29,9,0,0),
d(1979,10,28,8,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MWT'),
i(-21600,3600,'MPT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Navajo = Navajo()
|
[
"learnfuzzy@gmail.com"
] |
learnfuzzy@gmail.com
|
0fa7e3fc6fd950f1f23e646d84d728185e1189c8
|
b4edc965851727001349d72009bf833443754696
|
/urls/settings.py
|
d2026cad2b9a1ffc966fcace37646e5a1b03355c
|
[] |
no_license
|
codecov-test/urls
|
50491e56fc2b62cb6d31b9e3fc6dc886ab9e58cc
|
2762377fd90d5fa77729b796d2687705c7733b4b
|
refs/heads/master
| 2020-12-30T19:58:00.122749
| 2016-04-01T23:24:11
| 2016-04-01T23:24:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,744
|
py
|
# Django settings for urls project.
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Dennis Hedegaard', 'dennis@dhedegaard.dk'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'urls.db',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['u.neo2k.dk', 'urls.neo2k.dk',
'u.dhedegaard.dk', 'urls.dhedegaard.dk']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Copenhagen'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(ROOT, 'staticfiles/')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'jw-_tia*(q^e^pb^5n7q492sxpm%k*xg!0ya7uuh$%htpd7*7g'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'urls.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'urls',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
|
[
"dennis@dhedegaard.dk"
] |
dennis@dhedegaard.dk
|
7db34ccaf7b764230d55ed45cdf7469ea3208004
|
5982c00a0b0a1e46f46881f9fa883259dcc56fc1
|
/blog/migrations/0005_auto_20201018_0823.py
|
8b34aa117d4fdc0b70ce78c789800439d0e3f5a1
|
[] |
no_license
|
k-ken-source/FacultyNetwork
|
8e8e092b942eac07d957709585f1c27da60d5ac1
|
ee82473ac7b471026654cbce9384db3e65715961
|
refs/heads/main
| 2023-03-03T19:13:05.444697
| 2021-02-17T06:13:56
| 2021-02-17T06:13:56
| 339,133,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Generated by Django 2.0.7 on 2020-10-18 08:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_post_overview'),
]
operations = [
migrations.AlterField(
model_name='post',
name='thumbnail',
field=models.ImageField(default='thumbDefault.png', upload_to='Thumbnails'),
),
]
|
[
"chirag21works@gmail.com"
] |
chirag21works@gmail.com
|
21ee6ed1bf7eb08ddecbbb10cc3d1e6ec75e5a53
|
9050a892c73b89a1f92d126f42261fd7ca8fe780
|
/graph.py
|
2da5fb067090baff6e14aa1e200574e54d52d339
|
[] |
no_license
|
MatishaKansal/Pro--107
|
42050ff149e147f79ab0e89b7cc41db83780d2fc
|
2b9d7d9a79f47c1225f6a58d16ff9638bdf32001
|
refs/heads/main
| 2023-06-19T01:51:25.512222
| 2021-07-17T16:56:25
| 2021-07-17T16:56:25
| 386,993,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import csv
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
df = pd.read_csv("data.csv")
mean = df.groupby("level")["attempt"].mean()
fig = px.scatter(df, x = "level", y= "attempt", color="attempt", size="student_id")
fig.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d97bc67342b303dfc707649f0473beb996146c1d
|
58374bb4f0a77aff538c466615ec86273fc7a106
|
/flight_search.py
|
8dc1eb5073ceb3d3755b5fafda0dd9bd8db4b702
|
[] |
no_license
|
cheasingh/flight_deal
|
d963911511742691c78b939150dbbd47c02dc7a1
|
b300ccb152116c60c411dddf83e018d5fec794e6
|
refs/heads/master
| 2023-03-01T21:16:15.559210
| 2021-02-06T12:09:21
| 2021-02-06T12:09:21
| 335,671,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
import requests
import os
class FlightSearch:
# This class is responsible for talking to the Flight Search API.
def __init__(self):
self.param = {
"apikey": os.environ["TEQ_API"],
"fly_from": "PNH"
}
self.endpoint = "https://tequila-api.kiwi.com/v2/search"
def search(self, destination, ddate, **kwarge):
self.param["fly_to"] = destination
self.param["date_from"] = ddate
if kwarge:
self.param["date_to"] = kwarge["rdate"]
r = requests.get(self.endpoint, self.param)
return r.json()
|
[
"cheasingh168@gmail.com"
] |
cheasingh168@gmail.com
|
94617181fda33d026f84aa814c8003baa76f717b
|
8f91a726c40b6c270c5d34077f45dc4ecfbc1aeb
|
/Envs/venv/bin/python-config
|
2083d5caca77c3ffd15707ab75d30e52fa007076
|
[
"MIT"
] |
permissive
|
vaibhav-rbs/git-towards-greatness
|
08698e3397813e464380f22203250697927e7af6
|
164214f6482bd016029f2830b56859266311bf5b
|
refs/heads/master
| 2021-01-12T14:10:39.227194
| 2016-10-26T22:14:35
| 2016-10-26T22:14:35
| 69,923,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
#!/Users/vaibhavchauhan/Documents/python-course.eu/Envs/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"vaibhav@ixsystems.com"
] |
vaibhav@ixsystems.com
|
|
871aee1b131de9d12ca56ce93d8fe0751144a789
|
520365eb45cc298d0a20b44749482c3fe553d034
|
/workflows/assembly.py
|
85796450ea84cf5214f0f286f38c0f1488a2c505
|
[
"Artistic-2.0"
] |
permissive
|
marbl/MetaCompass
|
91e8a7f8028b1a719abf595c4389473a6bd0f5d8
|
2cdeb979ccb5264f0dbac868b4c884687424e3f5
|
refs/heads/master
| 2021-07-25T23:45:16.293277
| 2021-01-31T23:58:48
| 2021-01-31T23:58:48
| 31,922,199
| 35
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,564
|
py
|
#!/usr/bin/env python3
import os,sys,string,subprocess,signal,shutil
#psutil
import argparse
mcdir = sys.path[0]
parser = argparse.ArgumentParser(description='snakemake and metacompass params')
group1 = parser.add_argument_group('required')
#group1.add_argument("-s",'--snakefile', help='metacompass rules file',default="",nargs='?',required=1,type=str)
group1.add_argument("-c",'--config', help='config (json) file, set read length etc',default="",nargs='?',required=0,type=str)
group1.add_argument("-S",'--Samples', help='Provide file with fq reads (1 file per line)',default="", nargs='?',required=0,type=str)
group1.add_argument("-P",'--paired', help='Provide comma separated list of paired reads (r1.1.fq,r1.2.fq)',default="", nargs='?',required=0,type=str)
group1.add_argument("-U",'--unpaired', help='Provide comma separated list of unpaired reads (r1.fq,r2.fq,r3.fq)',default="", nargs='?',required=0,type=str)
group5 = parser.add_argument_group("metacompass")
group5.add_argument("-d",'--db', help='marker gene database directory',default="", nargs='?',type=str)
group5.add_argument("-i",'--iterations', type=int, help='num iterations',default=1, nargs='?')
group5.add_argument("-r",'--ref', help='reference genomes',default="NA",nargs='?')
group5.add_argument("-p",'--pickref', help='depth or breadth',default="breadth",nargs='?')
group5.add_argument("-m",'--mincov', help='min coverage to assemble',default="3",nargs='?',type=int)
group5.add_argument("-g",'--minctglen', help='min contig length',default="300",nargs='?',type=int)
group5.add_argument("-l",'--readlen', help='max read length',default="100",nargs='?',type=int)
group5.add_argument("-f",'--filter',help='filter recruited genomes with mash (experimental)',default=False,required=0, type=float)
group2 = parser.add_argument_group('output')
group2.add_argument("-b",'--clobber', help='clobber output directory (if exists?)',default=False,required=0,action='store_true')
group2.add_argument("-o",'--outdir', help='output directory? (cwd default)',default="./", nargs='?',type=str,required=1)
group2.add_argument("-e",'--sampleid', help='sample id (fq prefix is default)',default="NA", nargs='?',type=str,required=0)
group2.add_argument("-v",'--verbose', help='verbose',default=False,required=0,action='store_true')
group2.add_argument("-k",'--keepoutput', help='keep all output generated (default is to delete all but final fasta files)',default=False,required=0,action='store_true')
group3 = parser.add_argument_group('performance')
group3.add_argument("-t",'--threads', type=int,help='num threads',default=1, nargs='?')
group3.add_argument("-q",'--qsub', help='',default="", nargs='?',required=0)
group4 = parser.add_argument_group('snakemake')
group4.add_argument("-F",'--Force', help='force snakemake to rerun',default=False,required=0,action='store_true')
group4.add_argument("-u",'--unlock',help='unlock snakemake locks',default=False, required=0,action='store_true')
args = parser.parse_args()
minctglen = args.minctglen
db = str(args.db)
if db != "" and not os.path.isdir(db):
print("provided marker gene database directory %s does not exist; try again"%(db))
sys.exit(1)
mincov = args.mincov
readlen=args.readlen
clobber = args.clobber
unlock = args.unlock
threads = args.threads
iterations = args.iterations
ref = args.ref
mfilter = args.filter
keepoutput = args.keepoutput
#if args.filter:
#why todd?
# #empirically determined on datasets with known truth, right way to do this is with contains operation
# mfilter = 0.26
#snakefile = args.snakefile
config = args.config
samples = args.Samples.replace(" ","")
unpaired = args.unpaired
paired = args.paired
sampleid = args.sampleid
qsub = args.qsub
force = args.Force
verbose = args.verbose
outdir = args.outdir
pickref = args.pickref
prefix = "."
retry = False
if not os.path.exists(outdir):
os.makedirs(outdir)
prefix = outdir
else:
if os.path.exists(outdir) and not clobber:
print("ERROR: specified output directory %s exists! please remove first, or run with --clobber"%(outdir))
sys.exit(1)
elif os.path.exists(outdir) and force:
os.system("rm -rf %s/*"%(outdir))
os.system("mkdir %s"%(outdir))
prefix = outdir
elif os.path.exists(outdir):
prefix = outdir
#1. ensure required files are present
#if not os.path.exists(snakefile):
# print("ERROR: snakefile %s not found!"%(snakefile))
# sys.exit(1)
if config == "":
config = "%s/snakemake/config.json"%(mcdir)
elif not os.path.exists(config):
print("ERROR: configfile %s not found!"%(config))
sys.exit(1)
if ref != "NA":
print("confirming file containing reference genomes exists..")
if not os.path.exists(ref):
print("ERROR: reference genome file %s not found!"%(ref))
sys.exit(1)
else:
os.system("cp %s %s/%s"%(ref,prefix,ref.split(os.sep)[-1]))
print("[OK]")
#for reads in samples, check!
#if not os.path.exists
#print("confirming sample file exists..")
#if "," not in samples and not os.path.exists(samples):
# print("ERROR: sample file (-S) %s not found!"%(samples))
# sys.exit(1)
#else:
# print("[OK]")
#2. check for snakemake, bowtie2
print("checking for dependencies (Bowtie2, Blast, kmermask, Snakemake, etc)")
if len(qsub) > 0:
print("qsub--->",end="")
ret = subprocess.call("which qsub",shell=True)
if ret == 0:
print("[OK]")
qsub += " --jobs 4000"
else:
print("[FAIL]")
qsub=""
print("Bowtie2--->",end="")
sout = open("out.txt",'w')
ret = subprocess.call("bowtie2 --version",stdout=sout,shell=True)
if ret == 0:# and "2.2.9" in sout:
#print(stdout)
#sys.exit(1)
print("[OK]")
else:
print("[FAIL]")
sys.exit()
print("Blast+--->",end="")
ret = subprocess.call("which blastn",shell=True)
if ret == 0:
print("[OK]")
else:
print("[FAIL]")
sys.exit()
print("kmer-mask--->",end="")
ret = subprocess.call("which kmer-mask",shell=True)
if ret == 0:
print("[OK]")
else:
print("[FAIL]")
sys.exit()
#print("bedtools--->",end="")
#ret = subprocess.call("which bedtools",shell=True)
#if ret == 0:
# print("[OK]")
#else:
# print("[FAIL]")
# sys.exit()
if mfilter < 1.0:
print("mash--->",end="")
ret = subprocess.call("which mash",shell=True)
if ret == 0:
print("[OK]")
else:
print("[FAIL]")
sys.exit()
print("Snakemake--->",end="")
ret = subprocess.call("which snakemake",shell=True)
if ret == 0:
print("[OK]")
else:
print("[FAIL]")
sys.exit()
#3. process sample file
#3 paths: sample file, -P, or -U
#-S or -P & -U
allsamples = []
if samples != "" and (paired != "" or unpaired != ""):
print("ERROR: Must specific -S or -P/-U; please correct and relaunch")
sys.exit()
if samples != "" and (paired == "" and unpaired == ""):
samplesf = open(samples,'r')
for line in samplesf.readlines():
allsamples.append(line.strip())
elif samples == "" and paired != "" and unpaired == "":
#only paired end
if not "," in paired:
print("ERROR: --paired reads need to be provided as -P r1.1.fq,r1.2.fq")
sys.exit()
elif not os.path.exists(paired.split(",")[0]) or not os.path.exists(paired.split(",")[1]):
print("ERROR: could not locate --paired fq files %s,%s"%(paired.split(",")[0],paired.split(",")[1]))
sys.exit()
allsamples = paired.split(",")
elif samples == "" and paired == "" and unpaired != "":
#only unpaired
allfiles = []
if "," in unpaired:
allfiles = unpaired.split(",")
else:
allfiles.append(unpaired)
for ufile in allfiles:
if not os.path.exists(ufile):
print("ERROR: could not locate --unpaired file %s"%(ufile))
sys.exit()
allsamples = allfiles
elif samples == "" and paired != "" and unpaired != "":
#only paired end
if not "," in paired:
print("ERROR: --paired reads need to be provided as -P r1.1.fq,r1.2.fq")
sys.exit()
elif not os.path.exists(paired.split(",")[0]) or not os.path.exists(paired.split(",")[1]):
print("ERROR: could not locate --paired fq files %s,%s"%(paired.split(",")[0],paired.split(",")[1]))
sys.exit()
allsamples = paired.split(",")
#only unpaired
allfiles = []
if "," in unpaired:
allfiles = unpaired.split(",")
else:
allfiles = [unpaired]
for ufile in allfiles:
if not os.path.exists(ufile):
print("ERROR: could not locate --unpaired file %s"%(ufile))
sys.exit()
if len(allfiles) != 0:
allsamples.extend(allfiles)
#allsamples = []
#if "," not in samples:
# samplesf = open(samples,'r')
# for line in samplesf.readlines():
# allsamples.append(line.replace("\n",""))
#else:
# allsamples = [samples]
#CURRENTLY only single fastq file is supported
#s1.fq
#s2.fq
#3. for all samples, all iterations, go!
## process one sample at a time, so that we can track input/output easily and run in parallel if we want (qsub for each)
i = 0
isok = False
while i < iterations:
for s1 in allsamples[0:1]:
s1id = s1.split(os.sep)[-1].split(".")[0]
if sampleid != "NA":
s1id = sampleid
if force:
if os.path.exists("%s.fasta"%(s1id)):
os.system("rm %s.fasta"%(s1id))
if os.path.exists("%s.marker.match.1.fastq"%(s1id)):
os.system("rm %s.marker.match.1.fastq"%(s1id))
if os.path.exists("%s.marker.match.2.fastq"%(s1id)):
os.system("rm %s.marker.match.2.fastq"%(s1id))
os.system("rm -rf ./%s.*.assembly.out/"%(s1id))
elif os.path.exists("%s/%s.0.assembly.out/run.ok"%(prefix,s1id)):
#run finished ok, don't allow to clobber
print("ERROR: Output dir (%s/%s.0.assembly.out) exists and contains a previous, successful run. Please specify alternate output directory or force run with --force"%(prefix,s1id))
sys.exit(1)
elif retry and os.path.exists("%s/%s.0.assembly.out/run.fail"%(prefix,s1id)):
#run finished ok, don't allow to clobber
print("Output dir (%s/%s.0.assembly.out) exists and contains a previous, failed run. Attempting to resume failed run.."%(prefix,s1id))
elif not retry and os.path.exists("%s/%s.0.assembly.out/run.fail"%(prefix,s1id)):
print("ERROR: Output dir (%s/%s.0.assembly.out) exists and contains a previous, failed run. If you'd like to retry/resume this run, specify: --retry"%(prefix,s1id))
sys.exit(1)
if unlock:
#ret = subprocess.call("snakemake -r --verbose --config ref=%s.0.assembly.out/mc.refseq.fna --snakefile %s/snakemake/metacompass.iter0.unpaired.py --configfile %s --unlock"%(s1id,mcdir,config),shell=True)
cmd_ret="snakemake -r --verbose --reason --unlock --cores %d -a --configfile %s --config prefix=%s sample=%s pickref=breadth reference=%s mcdir=%s iter=%d length=%d mincov=%d minlen=%d mfilter=%f nthreads=%d ref=%s.0.assembly.out/mc.refseq.fna"%(threads,config,prefix,s1id,ref,mcdir,i,readlen,mincov,minctglen,mfilter,threads,s1id)
cmd_ret += " reads="
for fqfile in allsamples:
cmd_ret += str(fqfile)+","
cmd_ret = cmd_ret[:-1]
#todo:fix to work in all cases, add r1,r2,ru
if paired != "":
cmd_ret += " r1=%s r2=%s"%(paired.split(",")[0],paired.split(",")[1])
if unpaired != "":
cmd_ret += " ru=%s"%(unpaired)
if unpaired != "" and paired =="":
cmd_ret +=" --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired =="":
cmd_ret +=" --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired !="":
cmd_ret +=" --snakefile %s/snakemake/contigs.py"%(mcdir)
# elif samples =="":
#
ret = subprocess.call(cmd_ret,shell=True)
# ret = subprocess.call("snakemake -r --verbose --reason --cores %d -a --configfile %s --config prefix=%s sample=%s pickref=breadth reference=%s mcdir=%s iter=%d length=%d mincov=%d minlen=%d mfilter=%f nthreads=%d ref=%s.0.assembly.out/mc.refseq.fna --snakefile %s/snakemake/metacompass.iter0.unpaired.py reads=%s --unlock"%(threads,config,prefix,s1id,ref,mcdir,i,readlen,mincov,minctglen,mfilter,threads,s1id,mcdir,allsamples),shell=True)
# if paired != "":cmd += " r1=%s r2=%s"%(paired.split(",")[0],paired.split(",")[1])
if i == 0:
ret = 0
#todo: fix to work with diff types of reads?
if ref != "NA":
cmd = "snakemake --verbose --reason --cores %d -a --configfile %s --config prefix=%s sample=%s pickref=breadth reference=%s mcdir=%s iter=%d length=%d mincov=%d minlen=%d mfilter=%f nthreads=%d"%(threads,config,prefix,s1id,ref,mcdir,i,readlen,mincov,minctglen,mfilter,threads)
else:
cmd = "snakemake --verbose --reason --cores %d -a --configfile %s --config prefix=%s sample=%s pickref=breadth reference=%s/%s.%d.assembly.out/mc.refseq.fna mcdir=%s iter=%d length=%d mincov=%d minlen=%d mfilter=%f nthreads=%d "%(threads,config,prefix,s1id,prefix,s1id,i,mcdir,i,readlen,mincov,minctglen,mfilter,threads)
cmd += " reads="
for fqfile in allsamples:
cmd += str(fqfile)+","
#cmd = cmd[:-1]
if paired != "":
cmd += " r1=%s r2=%s"%(paired.split(",")[0],paired.split(",")[1])
if unpaired != "":
cmd += " ru=%s"%(unpaired)
if ref != "NA":
if unpaired != "" and paired =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired !="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
#todo: fix to work with diff types of reads..
elif samples =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
else:
if unpaired != "" and paired =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
elif paired != "" and unpaired !="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
#todo: fix to work with diff types of reads..
elif samples =="":
cmd += " --snakefile %s/snakemake/contigs.py"%(mcdir)
if verbose:
#iredunpaired reads -U?
cmd += " --verbose"
if retry:
cmd += " --rerun-incomplete"
else:
cmd += " --ignore-incomplete"
if len(qsub) > 0:
cmd += " --cluster %s"%(qsub)
else:
try:
ret = subprocess.Popen(cmd,shell=True)
ret.communicate()
except KeyboardInterrupt:
os.killpg(ret.pid,signal.SIGKILL)
except :
ret.returncode = 0
break
else:
ret = 0
if ref != "NA":
cmd = "snakemake --cores %d -a --configfile %s --config prefix=%s sample=%s reference=%s/%s.%d.assembly.out/contigs.pilon.fasta mcdir=%s iter=%d pickref=%s length=%d mincov=%d minlen=%d nthreads=%d"%(threads,config,prefix,s1id,prefix,s1id,i-1,mcdir,i,pickref,readlen,mincov,minctglen,threads)
else:
cmd = "snakemake --cores %d -a --configfile %s --config prefix=%s sample=%s reference=%s/%s.%d.assembly.out/contigs.pilon.fasta mcdir=%s iter=%d pickref=%s length=%d mincov=%d minlen=%d nthreads=%d"%(threads,config,prefix,s1id,prefix,s1id,i-1,mcdir,i,pickref,readlen,mincov,minctglen,threads)
cmd += " reads="
for fqfile in allsamples:
cmd += fqfile+","
cmd = cmd[:-1]
if paired != "":
cmd += " r1=%s r2=%s"%(paired.split(",")[0],paired.split(",")[1])
cmd += " --snakefile %s/snakemake/metacompass.py"%(mcdir)
if verbose:
cmd += " --verbose"
if retry:
cmd += " --rerun-incomplete"
else:
cmd += " --ignore-incomplete"
cmd += " --prioritize pilon_contigs"
if len(qsub) > 0:
cmd += " --cluster %s"%(qsub)
else:
try:
ret = subprocess.Popen(cmd,shell=True)
ret.communicate()
except KeyboardInterrupt:
os.killpg(ret.pid,signal.SIGKILL)
except:
#command finished but ran in background and timed out? h
ret.returncode = 0
break
#os.killpg(ret.pid,signal.SIGKILL)
if ret.returncode != 0:
print("ERROR: snakemake command failed; exiting..")
os.system("touch %s/%s.0.assembly.out/run.fail"%(prefix,s1id))
sys.exit(1)
i+=1
if os.path.exists("%s/%s.%d.assembly.out/contigs.fasta"%(prefix,s1id,i-1)):
#cleanup output
if not os.path.exists("%s/metacompass_output"%(prefix)):
os.mkdir("%s/metacompass_output"%(prefix))
if not os.path.exists("%s/metacompass_logs"%(prefix)):
os.mkdir("%s/metacompass_logs"%(prefix))
#only for assembly module run
os.system("mv %s/metacompass.recruited.fa %s/metacompass_output/"%(prefix,prefix))
os.system("mv %s/*.log %s/metacompass_logs/."%(prefix,prefix))
os.system("mv %s/%s.0.assembly.out/*.log %s/metacompass_logs/."%(prefix,s1id,prefix))
#only for assembly module run
os.system("mv %s/%s.0.assembly.out/coverage.txt %s/metacompass_output/metacompass.genomes_coverage.txt"%(prefix,s1id,prefix))
if not keepoutput:
print("Cleaning up files..")
shutil.rmtree("%s/%s.0.assembly.out/"%(prefix,s1id))
os.system("rm %s/*.fq "%(prefix))
os.system("rm %s/*.fastq "%(prefix))
os.system("rm %s/*.fasta "%(prefix))
else:
if os.path.exists("%s/%s.%d.assembly.out/contigs.fasta"%(prefix,s1id,i-1)):
os.makedirs("%s/%s.0.assembly.out/assembly_output"%(prefix,s1id), exist_ok=True)
########moving mapping after best strata
os.system("mv %s/%s.0.assembly.out/contigs.fasta %s/%s.0.assembly.out/assembly_output"%(prefix,s1id,prefix,s1id))
os.system("mv %s/%s.0.assembly.out/%s.sam %s/%s.0.assembly.out/assembly_output"%(prefix,s1id,s1id,prefix,s1id))
########testingstart
os.system("mv %s/%s.0.assembly.out/%s.sam.all %s/%s.0.assembly.out/assembly_output"%(prefix,s1id,s1id,prefix,s1id))
########testing end
if os.path.exists("%s/%s.0.assembly.out/selected_maps.sam"%(prefix,s1id)):
os.system("mv %s/%s.0.assembly.out/selected_maps.sam %s/%s.0.assembly.out/assembly_output"%(prefix,s1id,prefix,s1id))
#os.system("mv %s/%s.0.assembly.out/*buildcontigs* %s/%s.0.assembly.out/assembly_output"%(prefix,s1id,prefix,s1id))
#os.mkdir("%s/%s.0.assembly.out/mapped_reads"%(prefix,s1id))
#os.system("mv %s/%s.0.assembly.out/*.mc*.sam* %s/%s.0.assembly.out/mapped_reads"%(prefix,s1id,prefix,s1id))
os.system("rm %s/%s.0.assembly.out/*index "%(prefix,s1id))
os.system("rm %s/%s.0.assembly.out/*.bt2 "%(prefix,s1id))
os.system("rm %s/%s.0.assembly.out/*.fasta "%(prefix,s1id))
os.system("rm %s/*.f*q "%(prefix))
#os.system("rm %s/*.fasta "%(prefix))
if os.path.exists("%s/%s.0.assembly.out"%(prefix,s1id)):
if os.path.exists("%s/intermediate_files"%(prefix)):
os.system("rm -rf %s/intermediate_files"%(prefix))
shutil.move("%s/%s.0.assembly.out"%(prefix,s1id) , "%s/intermediate_files"%(prefix))
print("MetaCompass finished succesfully!")
else:
os.system("touch %s/%s.0.assembly.out/run.fail"%(prefix,s1id))
print("MetaCompass run failed. See Log files for more info")
|
[
"vcepeda@umiacs.umd.edu"
] |
vcepeda@umiacs.umd.edu
|
ecf53647cf9605c57afd26c095cf92ba9f56c670
|
a6ad23d52020affcfc592865bd5ac8900529ea03
|
/Exercises/exercise_05.py
|
1ef15aad726c7e79bec33bb8bcf42051e4187f27
|
[] |
no_license
|
sanchit411/test_cs696
|
71c093cd92f8f8c87071d88048b184b4f9931b5d
|
c689a009d87348434fdce77dc8549f1769ced90e
|
refs/heads/master
| 2021-05-10T07:55:16.346400
| 2018-05-03T20:33:26
| 2018-05-03T20:33:26
| 118,864,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
"""
Exercise 5
Word association with Nodes
When you type text into your phone, you may notice that a next word is automatically suggested to you before
you have even started typing a next word.
In this simplified example, we will create a node for each word in a string;
the "next" property for each Node will be a list of the words that have immediately followed that word.
** Reminder **
Instance variables are: self.variable_name
Class variables are: Class.variable_name
"""
import random
class Node:
"""
Node class for word association that includes:
* A class variable "node_dictionary" to store instances of the Node class where
* Keys are words (strings) and values are Nodes (the Node instance corresponding to the word)
* Instance variables "word" and "next"
* word - a string
* next - a list of words that follow this word (contains duplicate words)
* A __str__ method that returns the word instance variable of the node
"""
node_dictionary = {}
# The class variable, node_dictionary (below) is a dictionary with words (strings) as keys and lists as values.
def __init__(self, word):
"""
Takes in 1 argument, the word associated with the Node and also
initializes an instance variable "next" as an empty list.
The empty list will store words (not Nodes) that follow this word
:param word: A string
"""
self.word = word
self.next=[]
def __str__(self):
"""
returns the word associated with the Node
:return: string
"""
return
def print_all_nodes():
"""
Call this function in your main function to print all of the nodes
:return: None
"""
for k,v in Node.node_dictionary.items():
print("Word: {} \t Followed by: {}".format(k, v.next))
return
def new_beatles():
"""
This definition is purely for fun. Starting at the word "she", this will select a random word that could follow
and repeat this process to print a new beatles song of the same format.
:return: None
"""
nd = Node.node_dictionary # shortcut to the class dictionary
new_song = []
current_word = 'she'
for i in range(23):
new_song.append(current_word)
if len(nd[current_word].next) > 0:
current_word = random.choice(nd[current_word].next)
else: # word has no next word available - so pick a random word
current_word = random.choice(list(nd.keys()))
print(' '.join(new_song[:5]))
print(' '.join(new_song[5:12]))
print(' '.join(new_song[12:16]))
print(' '.join(new_song[16:]))
return
def main():
"""
When print_all_nodes() is called, the main definition for this script should print out (in any order):
Word: she Followed by: ['says', 'loves', 'loves']
Word: says Followed by: ['she']
Word: loves Followed by: ['you', 'you']
Word: you Followed by: ['and', 'know', 'and', 'know', 'should']
Word: and Followed by: ['you', 'you']
Word: know Followed by: ['that', 'you']
Word: that Followed by: ['cant']
Word: cant Followed by: ['be']
Word: be Followed by: ['bad', 'glad']
Word: bad Followed by: ['yes']
Word: yes Followed by: ['she']
Word: should Followed by: ['be']
Word: glad Followed by: []
:return: None
"""
# This is the text we will by analyzing
beatles = """She says she loves you
And you know that cant be bad
Yes she loves you
And you know you should be glad
"""
# In this following example of list comprehension, we process the text above into a list.
# The "if word" check at the end, will return false if word is an empty string
word_list = [word.lower().replace('\n','') for word in beatles.split(' ') if word]
previous_word = None
for word in word_list:
if not word in Node.node_dictionary:
Node.node_dictionary[word] =Node(word)
if previous_word:
Node.node_dictionary[word].next.append(previous_word)
previous_word = word
print_all_nodes()
# new_beatles()
return
if __name__ == '__main__':
main()
|
[
"sanchit@localhost.localdomain"
] |
sanchit@localhost.localdomain
|
565e730b5c9627c722788a4d7845bd826c4e8e03
|
449f7f45be6e44a98d6a4eed319da2cf94cc2076
|
/test001/hello.py
|
83c22c958f6dd01a22226794c8d4472ad7a672d1
|
[] |
no_license
|
cis024c/flaskdev
|
a673412b6ab90a63edbc58cd238a66563e586363
|
36ecbce665aed9ded0cb9a450e4a6cb1ead51517
|
refs/heads/master
| 2020-03-11T16:07:52.107735
| 2018-04-18T18:43:19
| 2018-04-18T18:43:19
| 130,106,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/saymyname")
def helloname():
return "Hello Bob!"
if __name__ == "__main__":
app.run()
|
[
"Sanjay_Dorairaj@cable.comcast.com"
] |
Sanjay_Dorairaj@cable.comcast.com
|
41dd34f0704e005795cdce0a8ac0c187fc698b90
|
cd41d350c5e604d37e62265be6fc5116450719b5
|
/euler_10.py
|
a902ca4198af89037314c1c055da3e53ba64768b
|
[] |
no_license
|
avichai/euler_project
|
9c306094f11b296f9ead7b2c18df7fb10e10b0db
|
13dcc0db8f18084afad476beec8bda808b7578eb
|
refs/heads/master
| 2021-09-08T07:16:45.344618
| 2018-03-08T09:43:53
| 2018-03-08T09:43:53
| 115,800,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# Summation of primes
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
#
# Find the sum of all the primes below two million.
# Avichai Ben David
import math
import eulerlib
NUMBER = 2000000
def is_prime(n):
for i in range(2, int(math.sqrt(n) + 1)):
if n % i == 0 and n != i:
return False
return True
def compute(n):
res = 2
for i in range(3, n, 2):
if is_prime(i):
res += i
return res
def compute1(n):
return sum(eulerlib.list_primes(n))
if __name__ == "__main__":
print(compute1(NUMBER))
# print(compute(NUMBER))
|
[
"avbendav@microsoft.com"
] |
avbendav@microsoft.com
|
848a6021591e88d0c051325f1028b69b90f99827
|
1ae6f4ed456ccacf8ae95a6cff20f15add056f3f
|
/ex.19.py
|
9537ec1d21f0338b8d0f1431af4090046ec85b20
|
[] |
no_license
|
KarinaMapa/Exercicios-Python-Repeticao
|
64db726b58659c10f77929e5ec13b4c329986db8
|
c1f73a81a4a296f79578ee0ffe79fe8625d7d565
|
refs/heads/master
| 2022-10-20T09:28:52.278850
| 2020-06-16T23:00:42
| 2020-06-16T23:00:42
| 272,828,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
quant = int(input('Digite a quantidade de números do conjunto: '))
maior = soma = 0
menor = 1001
for i in range(0, quant):
n = 1001
while n > 1000 or n < 0:
n = int(input('Número: '))
if n > 1000 or n < 0:
print('Número inválido')
if 0 < n < 1000:
if n > maior:
maior = n
if n < menor:
menor = n
soma += n
print('Maior número: {}'.format(maior))
print('Menor número: {}'.format(menor))
print('Soma: {}'.format(soma))
|
[
"karina-mapa@hotmail.com"
] |
karina-mapa@hotmail.com
|
d0e3d3aa95dcd753c2756ffd0166b533298215ed
|
97621883ac33bccb655a12295a1b28f091b5a8fe
|
/venv/bin/easy_install
|
313fccefa07aa6bc9bd503ecfad2393bc28ea396
|
[
"MIT"
] |
permissive
|
olamijinadebayo/blog
|
1616f06180782575a658f625b0725dc98c2c74d0
|
b3ae83055d62ea4b09ae621566e3d0cb862da73c
|
refs/heads/master
| 2020-03-11T21:12:57.960586
| 2018-04-25T08:19:24
| 2018-04-25T08:19:24
| 130,258,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
#!/home/ola/Documents/moringa-school-projects/microblog/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"consultolamijin@gmail.com"
] |
consultolamijin@gmail.com
|
|
2faed508287230ab969292eaf79e86a5f4c970fb
|
5e9c890f5677aa561f7acdb1b1249dc06d5a166a
|
/tests/output/timesteptest.py
|
d613f2dd19d68fdf604ce930488681bdf566445a
|
[
"Apache-2.0"
] |
permissive
|
fispact/pypact
|
6690b6d4f8265990e2c238532e57c4c9cf8c88d4
|
be7723189236de333c44131ca534fb4286b4e3f7
|
refs/heads/master
| 2023-01-24T09:36:47.363611
| 2022-10-23T17:58:40
| 2022-10-23T17:58:40
| 118,437,112
| 20
| 10
|
Apache-2.0
| 2022-10-23T17:58:41
| 2018-01-22T09:42:58
|
Python
|
UTF-8
|
Python
| false
| false
| 9,251
|
py
|
from tests.output.baseoutputtest import BaseOutputUnitTest
from tests.output.doseratetest import DoseRateAssertor
from tests.output.nuclidestest import NuclidesAssertor
import pypact as pp
class TimeStepAssertor(BaseOutputUnitTest):
ds_assertor = DoseRateAssertor()
nuc_assertor = NuclidesAssertor()
def assert_defaults(self, timestep):
ts = pp.TimeStep()
ts.irradiation_time = 0.0
ts.cooling_time = 0.0
ts.flux = 0.0
ts.total_heat = 0.0
ts.alpha_heat = 0.0
ts.beta_heat = 0.0
ts.gamma_heat = 0.0
ts.ingestion_dose = 0.0
ts.inhalation_dose = 0.0
ts.initial_mass = 0.0
ts.total_mass = 0.0
ts.number_of_fissions = 0.0
ts.burnup = 0.0
ts.total_activity = 0.0
ts.total_activity_exclude_trit = 0.0
ts.alpha_activity = 0.0
ts.beta_activity = 0.0
ts.gamma_activity = 0.0
self.assert_inventory(timestep, ts)
self.ds_assertor.assert_defaults(timestep.dose_rate)
self.nuc_assertor.assert_defaults(timestep.nuclides)
def assert_inventory(self, inv, compareinv):
self.assertValueAndType(inv, pp.TimeStep, 'irradiation_time', float, compareinv.irradiation_time)
self.assertValueAndType(inv, pp.TimeStep, 'cooling_time', float, compareinv.cooling_time)
self.assertValueAndType(inv, pp.TimeStep, 'flux', float, compareinv.flux)
self.assertValueAndType(inv, pp.TimeStep, 'total_heat', float, compareinv.total_heat)
self.assertValueAndType(inv, pp.TimeStep, 'alpha_heat', float, compareinv.alpha_heat)
self.assertValueAndType(inv, pp.TimeStep, 'beta_heat', float, compareinv.beta_heat)
self.assertValueAndType(inv, pp.TimeStep, 'gamma_heat', float, compareinv.gamma_heat)
self.assertValueAndType(inv, pp.TimeStep, 'initial_mass', float, compareinv.initial_mass)
self.assertValueAndType(inv, pp.TimeStep, 'ingestion_dose', float, compareinv.ingestion_dose)
self.assertValueAndType(inv, pp.TimeStep, 'total_mass', float, compareinv.total_mass)
self.assertValueAndType(inv, pp.TimeStep, 'number_of_fissions', float, compareinv.number_of_fissions)
self.assertValueAndType(inv, pp.TimeStep, 'burnup', float, compareinv.burnup)
self.assertValueAndType(inv, pp.TimeStep, 'inhalation_dose', float, compareinv.inhalation_dose)
self.assertValueAndType(inv, pp.TimeStep, 'total_activity', float, compareinv.total_activity)
self.assertValueAndType(inv, pp.TimeStep, 'total_activity_exclude_trit', float, compareinv.total_activity_exclude_trit)
self.assertValueAndType(inv, pp.TimeStep, 'alpha_activity', float, compareinv.alpha_activity)
self.assertValueAndType(inv, pp.TimeStep, 'beta_activity', float, compareinv.beta_activity)
self.assertValueAndType(inv, pp.TimeStep, 'gamma_activity', float, compareinv.gamma_activity)
def assert_timestep(self, inv, timestep):
self.ds_assertor.assert_timestep(inv.dose_rate, timestep)
self.nuc_assertor.assert_timestep(inv.nuclides, timestep)
# Let's test some key timesteps
# much too time consuming to test all timesteps
if timestep == 1:
self.assert_inventory(inv, timestep_1_inv())
elif timestep == 2:
self.assert_inventory(inv, timestep_2_inv())
elif timestep == 14:
self.assert_inventory(inv, timestep_14_inv())
elif 16 > timestep > 2:
return
else:
self.assert_defaults(inv)
def timestep_1_inv():
inv = pp.TimeStep()
inv.irradiation_time = 0.0
inv.cooling_time = 0.0
inv.flux = 3.3400E+10
inv.alpha_heat = 1.00026E-08
inv.beta_heat = 3.98609E-11
inv.gamma_heat = 6.71486E-11
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 6.59242E-01
inv.inhalation_dose = 1.17557E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 1.45396E+07
inv.total_activity_exclude_trit = 1.45396E+07
inv.alpha_activity = 1.453958E+07
inv.beta_activity = 0.0
inv.gamma_activity = 0.0
return inv
def timestep_2_inv():
inv = pp.TimeStep()
inv.irradiation_time = 2.6298E+06
inv.cooling_time = 0.0
inv.flux = 3.3400E+10
inv.alpha_heat = 1.00026E-08
inv.beta_heat = 1.09700E-09
inv.gamma_heat = 1.12065E-10
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 6.84076E-01
inv.inhalation_dose = 1.17614E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 3.11345E+07
inv.total_activity_exclude_trit = 3.11345E+07
inv.alpha_activity = 1.453958E+07
inv.beta_activity = 1.658438E+07
inv.gamma_activity = 1.057793E+04
return inv
def timestep_14_inv():
inv = pp.TimeStep()
inv.irradiation_time = 2.6298E+06 + 5.2596E+06 + 7.8894E+06 + 1.5779E+07 \
+ 1.5779E+07 + 1.5779E+07
inv.cooling_time = 6.0000E+01 + 8.6400E+04 + 2.5434E+06 + 1.3149E+07 \
+ 1.5779E+07 + 6.3115E+07 + 6.3115E+07
inv.flux = 0.0000E+00
inv.alpha_heat = 1.00031E-08
inv.beta_heat = 1.80108E-09
inv.gamma_heat = 1.36712E-10
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 7.01423E-01
inv.inhalation_dose = 1.17728E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 4.11571E+07
inv.total_activity_exclude_trit = 4.11571E+07
inv.alpha_activity = 1.454025E+07
inv.beta_activity = 2.659877E+07
inv.gamma_activity = 1.808869E+04
return inv
class TimeStepUnitTest(BaseOutputUnitTest):
assertor = TimeStepAssertor()
def test_fission_example(self):
ts = pp.TimeStep()
ts.fispact_deserialize(self.filerecord_fission, 1)
self.assertEqual(ts.alpha_heat, 7.22533E-10, "Assert alpha heat")
self.assertEqual(ts.number_of_fissions, 0.0, "Assert number of fissions is zero")
self.assertEqual(ts.burnup, 0.0, "Assert burnup is zero")
ts.fispact_deserialize(self.filerecord_fission, 2)
self.assertEqual(ts.alpha_heat, 7.38131E-10, "Assert alpha heat")
self.assertEqual(ts.number_of_fissions, 6.73186E+09, "Assert number of fissions is non zero")
self.assertEqual(ts.burnup, 2.93608E-11, "Assert burnup is non zero")
def test_fispact_deserialize(self):
def func(ts, i):
ts.fispact_deserialize(self.filerecord91, i)
self.assertor.assert_timestep(ts, i)
self._wrapper(func)
def test_fispact_deserialize_isirradiation(self):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertEqual(True, ts.isirradiation, "Assert timestep 1 is an irradiation step")
ts.fispact_deserialize(self.filerecord91, 2)
self.assertEqual(True, ts.isirradiation, "Assert timestep 2 is an irradiation step")
ts.fispact_deserialize(self.filerecord91, 14)
self.assertEqual(False, ts.isirradiation, "Assert timestep 14 is a cooling step")
def test_fispact_deserialize_currenttime(self):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertEqual(0.0, ts.currenttime, "Assert the irradiation time for timestep 1")
ts.fispact_deserialize(self.filerecord91, 2)
self.assertEqual(2.6298E+06, ts.currenttime, "Assert the irradiation time for timestep 2")
ts.fispact_deserialize(self.filerecord91, 14)
self.assertEqual(ts.cooling_time, ts.currenttime, "Assert the cooling time for timestep 14")
def test_fispact_deserialize_nonuclides(self):
ts = pp.TimeStep(ignorenuclides=True)
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
ts.fispact_deserialize(self.filerecord91, 2)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
ts.fispact_deserialize(self.filerecord91, 14)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
def test_fispact_readwriteread(self):
def func(ts, i):
# deserialize from standard output
ts.fispact_deserialize(self.filerecord91, i)
self.assertor.assert_timestep(ts, i)
# serialize to JSON
j = ts.json_serialize()
# reset object
newts = pp.TimeStep()
self.assertor.assert_defaults(newts)
# deserialize JSON and compare to original
newts.json_deserialize(j)
self.assertor.assert_timestep(newts, i)
self._wrapper(func)
def _wrapper(self, func):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
for i in range(-100, 100):
func(ts, i)
|
[
"stainer.tom@gmail.com"
] |
stainer.tom@gmail.com
|
d58cb7de2dbd4f821d0407b7ef618003f1f9fc9b
|
cb4db25a0b13f058f1a31b38d80d76a118d1e2dc
|
/venv/lib/python3.6/site-packages/google/api/usage_pb2.py
|
efe4f7945d109abc9613cb147c11bb4917bdf030
|
[
"MIT"
] |
permissive
|
Hackaton-Dragons/Never-Boils
|
73df2b65f54a77d961ce53dea350b7d2a4261154
|
2d43e6e07fb18409d5a964f44f481d28d2352531
|
refs/heads/master
| 2020-03-09T20:27:54.554616
| 2018-10-08T05:52:33
| 2018-10-08T05:52:33
| 128,985,616
| 1
| 0
|
MIT
| 2018-04-15T13:32:45
| 2018-04-10T19:35:32
|
Python
|
UTF-8
|
Python
| false
| true
| 4,244
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/usage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/usage.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n\x16google/api/usage.proto\x12\ngoogle.api\x1a\x1cgoogle/api/annotations.proto\"C\n\x05Usage\x12\x14\n\x0crequirements\x18\x01 \x03(\t\x12$\n\x05rules\x18\x06 \x03(\x0b\x32\x15.google.api.UsageRule\"?\n\tUsageRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12 \n\x18\x61llow_unregistered_calls\x18\x02 \x01(\x08\x42%\n\x0e\x63om.google.apiB\nUsageProtoP\x01\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_USAGE = _descriptor.Descriptor(
name='Usage',
full_name='google.api.Usage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requirements', full_name='google.api.Usage.requirements', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.Usage.rules', index=1,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=135,
)
_USAGERULE = _descriptor.Descriptor(
name='UsageRule',
full_name='google.api.UsageRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.UsageRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_unregistered_calls', full_name='google.api.UsageRule.allow_unregistered_calls', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=200,
)
_USAGE.fields_by_name['rules'].message_type = _USAGERULE
DESCRIPTOR.message_types_by_name['Usage'] = _USAGE
DESCRIPTOR.message_types_by_name['UsageRule'] = _USAGERULE
Usage = _reflection.GeneratedProtocolMessageType('Usage', (_message.Message,), dict(
DESCRIPTOR = _USAGE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.Usage)
))
_sym_db.RegisterMessage(Usage)
UsageRule = _reflection.GeneratedProtocolMessageType('UsageRule', (_message.Message,), dict(
DESCRIPTOR = _USAGERULE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.UsageRule)
))
_sym_db.RegisterMessage(UsageRule)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\nUsageProtoP\001\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope)
|
[
"contact@noahh.io"
] |
contact@noahh.io
|
74be90181e7bca2498ff46ae8c4d3ef026368489
|
43cdba0d7b6530e0cc3d091affe9d084fe6d86bb
|
/testing.py
|
34fe5466b03ec6f69638432197234c1a0a073d4b
|
[] |
no_license
|
timfelle/OptimisationToolbox
|
532e00aaa08f64a05f08c9c2bcaca1ed41bd2b83
|
8853fd0c2cdc1fcc22368fe2ca90bd62a0c809bb
|
refs/heads/master
| 2020-04-30T15:25:33.068812
| 2019-04-16T09:01:10
| 2019-04-16T09:01:10
| 176,919,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
import optimisation
import numpy as np
sep = '========================================================================'
# Define the matrices used in all
H = np.matrix([[ 2, 1 ],[1,2]])
g = np.matrix( [-2, -3 ] ).T
Eq_A = np.matrix([[ 1, 2]]).T
Eq_b = np.matrix([[ 2 ]]).T
In_A = np.matrix([[1,-2],[-1,-2], [-1,2], [1,0], [0,1]]).T
In_b = np.matrix([[ -2, -6, -2, 0, 0 ]]).T
In_C = [lambda x: x[0]**2 + x[1]**2 - 1,
lambda x: -x[0]**2 - x[1]**2 + (1.5)**2]
A2 = [ [1,-1], [1,1] ]
b2 = [0,1]
print(sep)
# =============================================================================
# Setting up and testing functions related to QP
QP = optimisation.QP(
H,g,
Eq_A = Eq_A, Eq_b = Eq_b,
In_A = In_A, In_b = In_b,
In_C = In_C,
Eq_C=In_C
)
#QP.help()
#QP.add_constraints(In_A=A2,In_b=b2)
QP.solve()
QP.print()
#QP.display(x_lim=[-0.5,4], y_lim=[-0.5,4], obj_levels=50, display=True)
exit()
print(sep)
# =============================================================================
# Setting up and testing functions related to LP
# Define the problem
LP = optimisation.LP(
g,
Eq_A = Eq_A, Eq_b = Eq_b,
In_A = In_A, In_b = In_b
)
LP.help()
LP.add_constraints(In_A=A2,In_b=b2)
LP.print()
LP.display(x_lim=[-0.5,5], y_lim=[-0.5,3], obj_levels=50)
print(sep)
# =============================================================================
# Setting up and testing functions related to NLP
NLP = optimisation.NLP(
lambda x: x[0]**3 + x[1]**2 + x[0], 2,
Eq_A = Eq_A, Eq_b = Eq_b,
In_A = In_A, In_b = In_b
)
NLP.help()
NLP.add_constraints(In_A=A2,In_b=b2)
NLP.print()
NLP.display(x_lim=[-0.5,5], y_lim=[-0.5,3], obj_levels=50)
|
[
"timfelle@hotmail.com"
] |
timfelle@hotmail.com
|
ec88a0da26bc9dce4c08887164b88d20d9e6fa98
|
a3fd69f7feb476715180d3bd85524d431b56abd1
|
/pb_logging/syslog_handler.py
|
4aea54c8731a8ec5de5a8646e3bb6f0dccfa9de2
|
[] |
no_license
|
fbrehm/py_pb_logging
|
c22612f953e4f95e1d5ccade32859b91e1b69ea1
|
e17b4e3c8a4fc22323e1153e5a539c837a529489
|
refs/heads/master
| 2021-01-17T10:03:04.098123
| 2018-06-12T19:48:40
| 2018-06-12T19:48:40
| 8,831,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@summary: wrapping logging handler for logging.handlers.SysLogHandler
to avoid BOM errors in syslog messages
"""
# Standard modules
import socket
import sys
import os
import stat
import errno
from logging.handlers import SYSLOG_UDP_PORT
from logging.handlers import SysLogHandler
# Third party modules
# Own modules
__version__ = '0.2.2'
# =============================================================================
class PbSysLogHandler(SysLogHandler):
"""
A wrapper logging handler for logging.handlers.SysLogHandler
to avoid BOM errors in syslog messages.
"""
def __init__(
self, address=('localhost', SYSLOG_UDP_PORT),
facility=SysLogHandler.LOG_USER,
socktype=None,
encoding="utf-8"):
"""
Initialize the PbSysLogHandler.
To log to a local syslogd, "PbSysLogHandler(address = "/dev/log")"
can be used.
If facility is not specified, LOG_USER is used.
@param address: either the network socket of the syslog daemon
(if given as tuple) or the filename of the UNIX socket
of the syslog daemon (if given as str).
@type address: tuple or str
@param facility: syslog facility to use
@type facility: int
@param socktype: the socket type (socket.SOCK_DGRAM or
socket.SOCK_STREAM) to use.
Not used in Python2 <= 2.6 and Python3 <= 3.1.
@type socktype: int
@param encoding: the character set to use to encode unicode messages
@type encoding: str
"""
# Initialisation of the parent object
do_socktype = False
do_ux_socket = False
if sys.version_info[0] > 2:
if sys.version_info[1] > 1:
do_socktype = True
else:
if sys.version_info[1] > 6:
do_socktype = True
if isinstance(address, str):
if not os.path.exists(address):
raise OSError(errno.ENOENT, "File doesn't exists", address)
mode = os.stat(address).st_mode
if not stat.S_ISSOCK(mode):
raise OSError(
errno.EPERM, "File is not a UNIX socket file", address)
if not os.access(address, os.W_OK):
raise OSError(
errno.EPERM, "No write access to socket", address)
do_ux_socket = True
if do_socktype:
if do_ux_socket:
SysLogHandler.__init__(self, address, facility, None)
else:
SysLogHandler.__init__(self, address, facility, socktype)
else:
SysLogHandler.__init__(self, address, facility)
self.encoding = encoding
"""
@ivar: the character set to use to encode unicode messages
@type: str
"""
# -------------------------------------------------------------------------
def _connect_unixsocket(self, address):
use_socktype = getattr(self, 'socktype', None)
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
# -------------------------------------------------------------------------
def emit(self, record):
"""
Wrapper method for SysLogHandler.emit() to encode an unicode message
to UTF-8 (or whatever).
"""
msg = record.msg
if sys.version_info[0] > 2:
if isinstance(msg, bytes):
msg = msg.decode(self.encoding)
record.msg = msg
else:
if isinstance(msg, unicode): # noqa
msg = msg.encode(self.encoding)
record.msg = msg
SysLogHandler.emit(self, record)
# =============================================================================
if __name__ == "__main__":
pass
# =============================================================================
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
[
"frank.brehm@profitbricks.com"
] |
frank.brehm@profitbricks.com
|
0381a8f78160698253492ad282902dea14fda2ef
|
d9563b4c1aa57ea2f34844f2d9e3c645f7883487
|
/Project/demo.py
|
25d7cd0dbc22017caf3e7c94ca619daa44fe024d
|
[] |
no_license
|
smartinez1/IBIO4490
|
fb1444650f91a349d40233815f179c7d168085da
|
b1d9f96b2a38162fb5087004aafa52f848efcf30
|
refs/heads/master
| 2020-04-20T08:21:56.389194
| 2019-06-02T02:18:36
| 2019-06-02T02:18:36
| 168,736,816
| 0
| 0
| null | 2019-02-01T17:43:34
| 2019-02-01T17:43:34
| null |
UTF-8
|
Python
| false
| false
| 13,178
|
py
|
#!/usr/bin/env python3
# coding: utf-8
#
import os
import os.path as osp
import datetime
import shlex
import subprocess
import pickle
import pydicom
import cv2
from skimage import io
from google_drive_downloader import GoogleDriveDownloader as gdd
import pytz
import torch
import yaml
import warnings
warnings.filterwarnings('ignore')
if os.path.isdir('CHAOS_Train_Sets')==False:
gdd.download_file_from_google_drive(file_id='1N3hva6J05q5OgPKzCcr9xmEsP47j0u9W',
dest_path='./CHAOS_Train_Sets/CHAOS_Train_Sets.zip',
unzip=True)
if os.path.isdir('dataSet_network.pickle')==False:
gdd.download_file_from_google_drive(file_id='1fiugEkzLchVI6CrBF9S-iQFweNVOJgVw',
dest_path='./dataSet_network.pickle',
unzip=False)
if os.path.isdir('model')==False:
gdd.download_file_from_google_drive(file_id='1eTgfMg2UUr9s1cQMXzIJG65po335oIyo',
dest_path='./model.zip',
unzip=True)
configurations = {
1: dict(
max_iteration=100000,
lr=1.0e-10,
momentum=0.99,
weight_decay=0.0005,
interval_validate=4000,
)
}
cfg = configurations[1]
gpu = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
cuda = torch.cuda.is_available()
import numpy as np
class_names = np.array([
'background',
'liver',
'Right Kidney',
'Left Kidney',
'Spleen'
])
# ## FCN - Model
# In[ ]:
def getChaosDataset():
base='CHAOS_Train_Sets/Train_Sets/MR'
patients=os.listdir('CHAOS_Train_Sets/Train_Sets/MR')
strucs=[]
labels=[]
liver=[55,70]
Rkidney=[110,135]#,126]
Lkidney=[175,200]#,189]
spleen=[240,255]#,252]
#BR=0
for idx in range(len(patients)):
pat=str(patients[idx])
typeMR2=('T2SPIR')
patiType=os.path.join(base,pat,typeMR2)
imgs=os.path.join(patiType,'DICOM_anon')
ground=os.path.join(patiType,'Ground')
names=os.listdir(imgs)
names=np.sort(names)
names_g=os.listdir(ground)
names_g=np.sort(names_g)
struct=np.zeros((len(names),256,256))
label=np.zeros((len(names),256,256))
for jdx in range(len(names)):
time=names[jdx]
ann=names_g[jdx]
OGimagePath=os.path.join(imgs,time)
ann_path=os.path.join(ground,ann)
annotation=io.imread(ann_path)
heigth, width=annotation.shape
if (heigth!=256 or width!=256):
annotation=cv2.resize(annotation,(256,256))
transformedAnnotation=np.zeros((256,256))
for kdx in range(256):
for ldx in range(256):
if (annotation[kdx,ldx]>=liver[0] and annotation[kdx,ldx]<=liver[1]):
transformedAnnotation[kdx,ldx]=1
elif (annotation[kdx,ldx]>=Rkidney[0] and annotation[kdx,ldx]<=Rkidney[1]):
transformedAnnotation[kdx,ldx]=2
elif (annotation[kdx,ldx]>=Lkidney[0] and annotation[kdx,ldx]<=Lkidney[1]):
transformedAnnotation[kdx,ldx]=3
elif (annotation[kdx,ldx]>=spleen[0] and annotation[kdx,ldx]<=spleen[1]):
transformedAnnotation[kdx,ldx]=4
else:
transformedAnnotation[kdx,ldx]=0
heigth, width=transformedAnnotation.shape
ds = pydicom.read_file(OGimagePath)
mapp = ds.pixel_array
heigth, width=mapp.shape
if (heigth!=256 or width!=256):
mapp=cv2.resize(mapp,(256,256))
mapp=(mapp)
struct[jdx,:,:]=mapp.astype('double')
label[jdx,:,:]=transformedAnnotation
strucs.append(struct)
labels.append(label)
with open('dataSet_network.pickle', 'wb') as handle:
pickle.dump([strucs,labels], handle, protocol=pickle.HIGHEST_PROTOCOL)
try:
with open('dataSet_network.pickle','rb') as pickleFile:
dataSet=pickle.load(pickleFile)
strucs=dataSet[0]
labels=dataSet[1]
print('Dataset Found')
print('Variables created')
except FileNotFoundError:
print('Dataset not found')
print('Creating dataset...')
getChaosDataset()
print('Dataset created')
with open('dataSet_network.pickle','rb') as pickleFile:
dataSet=pickle.load(pickleFile)
strucs=dataSet[0]
labels=dataSet[1]
print('Variables created')
val=int(np.round((len(strucs))/5))
sett='val'
numSlices=0
images=[]
annotations=[]
if sett=='train':
for jdx in range(val,len(labels)):
numSlices=numSlices+labels[jdx].shape[0]
for kdx in range(labels[jdx].shape[0]):
images.append(strucs[jdx][kdx])
annotations.append(labels[jdx][kdx])
elif sett=='val':
for jdx in range(0,val):
numSlices=numSlices+labels[jdx].shape[0]
for kdx in range(labels[jdx].shape[0]):
images.append(strucs[jdx][kdx])
annotations.append(labels[jdx][kdx])
import numpy as np
import torch.nn as nn
class organNet32s(nn.Module):
def __init__(self, n_class=5):
super(organNet32s, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4
# conv3
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8
# conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16
# conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32
# fc6
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
# fc7
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32,
bias=False)
def forward(self, x, debug = False):
h = x
if debug: print(h.data.shape)
h = self.relu1_1(self.conv1_1(h))
if debug: print(h.data.shape)
h = self.relu1_2(self.conv1_2(h))
if debug: print(h.data.shape)
h = self.pool1(h)
if debug: print(h.data.shape)
h = self.relu2_1(self.conv2_1(h))
if debug: print(h.data.shape)
h = self.relu2_2(self.conv2_2(h))
if debug: print(h.data.shape)
h = self.pool2(h)
if debug: print(h.data.shape)
h = self.relu3_1(self.conv3_1(h))
if debug: print(h.data.shape)
h = self.relu3_2(self.conv3_2(h))
if debug: print(h.data.shape)
h = self.relu3_3(self.conv3_3(h))
if debug: print(h.data.shape)
h = self.pool3(h)
if debug: print(h.data.shape)
h = self.relu4_1(self.conv4_1(h))
if debug: print(h.data.shape)
h = self.relu4_2(self.conv4_2(h))
if debug: print(h.data.shape)
h = self.relu4_3(self.conv4_3(h))
if debug: print(h.data.shape)
h = self.pool4(h)
if debug: print(h.data.shape)
h = self.relu5_1(self.conv5_1(h))
if debug: print(h.data.shape)
h = self.relu5_2(self.conv5_2(h))
if debug: print(h.data.shape)
h = self.relu5_3(self.conv5_3(h))
if debug: print(h.data.shape)
h = self.pool5(h)
if debug: print(h.data.shape)
h = self.relu6(self.fc6(h))
if debug: print(h.data.shape)
h = self.drop6(h)
if debug: print(h.data.shape)
h = self.relu7(self.fc7(h))
if debug: print(h.data.shape)
h = self.drop7(h)
if debug: print(h.data.shape)
h = self.score_fr(h)
if debug: print(h.data.shape)
h = self.upscore(h)
if debug: print(h.data.shape)
h = h[:, :, 19:19 + x.size()[2], 19:19 + x.size()[3]].contiguous()
if debug: print(h.data.shape)
return h
model = organNet32s(n_class=5)
if cuda: model.to('cuda')
model.eval()
resume = 'model/model_best.pth.tar' #model_best.pth.tar checkpoint.pth.tar'
print('Loading checkpoint from: '+resume)
model.load_state_dict(torch.load(resume)['model_state_dict'])
import torch
import numpy as np
def fileimg2model(idx):
img = images[idx]
img = np.array(img, dtype=np.uint8)
return transform(img)
def transform(image):
image = image.astype(np.float64)
img = np.zeros((image.shape[0],image.shape[1],3))
img[:,:,0]=image
img[:,:,1]=image
img[:,:,2]=image
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
def untransform(img):
img = img.numpy()
img = img.transpose(1, 2, 0)
img = img.astype(np.uint8)
return img
import matplotlib.pyplot as plt
from torch.autograd import Variable
def imshow_label(label_show, alpha=None):
import matplotlib
cmap = plt.cm.jet
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplist[0] = (0.0,0.0,0.0,1.0)
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
bounds = np.arange(0,len(class_names))
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
plt.imshow(label_show, cmap=cmap, norm=norm, alpha=alpha)
if alpha is None:
plt.title(str([class_names[i] for i in np.unique(label_show) if i!=0]))
cbar = plt.colorbar(ticks=bounds)
cbar.ax.set_yticklabels(class_names)
def run_fromfile(idx,name):
img_torch = torch.unsqueeze(fileimg2model(idx), 0)
if cuda: img_torch = img_torch.to('cuda')
with torch.no_grad():
plt.imshow((images[idx]))
plt.savefig(('testImages/image_'+name+'.png'))
score = model(img_torch)
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
plt.imshow((images[idx]), alpha=.9)
imshow_label(lbl_pred[0], alpha=0.5)
plt.savefig(('testImages/label_asd_'+name+'.png'))
imshow_label(lbl_pred[0])
plt.savefig(('testImages/label_'+name+'.png'))
return lbl_pred
def run_simple(idx):
img_torch = torch.unsqueeze(fileimg2model(idx), 0)
if cuda: img_torch = img_torch.to('cuda')
with torch.no_grad():
OG=images[idx]
score = model(img_torch)
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl = annotations[idx]
return OG, lbl_pred, lbl
os.system('mkdir examples')
whole_preds=[]
counter=0
for jdx in range(val):
whole_pred=np.zeros(strucs[jdx].shape)
for idx in range(strucs[jdx].shape[0]):
img,pred,lbl=run_simple(counter)
whole_pred[idx,:,:]=pred
counter=counter+1
whole_preds.append(whole_pred)
img_file = np.random.randint(0,counter-1)
img, pred, lbl=run_simple(img_file)
plt.subplot(1,3,1)
plt.imshow(img)
plt.title('Original Image')
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(lbl)
plt.title('Ground Truth')
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(pred[0])
plt.title('Prediction')
plt.axis('off')
plt.show()
|
[
"s.martinez1@uniandes.edu.co"
] |
s.martinez1@uniandes.edu.co
|
3729d9da023e6a5a84cc1c3bac5ff6e4ef5f87db
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/redis/redis/sentinel.pyi
|
ea13ae681287fa1353217d2e6d217fe0898b122b
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 2,820
|
pyi
|
from collections.abc import Iterable, Iterator
from typing import Any, TypeVar, overload
from typing_extensions import Literal, TypeAlias
from redis.client import Redis
from redis.commands.sentinel import SentinelCommands
from redis.connection import Connection, ConnectionPool, SSLConnection
from redis.exceptions import ConnectionError
_RedisT = TypeVar("_RedisT", bound=Redis[Any])
_AddressAndPort: TypeAlias = tuple[str, int]
_SentinelState: TypeAlias = dict[str, Any] # TODO: this can be a TypedDict
class MasterNotFoundError(ConnectionError): ...
class SlaveNotFoundError(ConnectionError): ...
class SentinelManagedConnection(Connection):
connection_pool: SentinelConnectionPool
def __init__(self, **kwargs) -> None: ...
def connect_to(self, address: _AddressAndPort) -> None: ...
def connect(self) -> None: ...
# The result can be either `str | bytes` or `list[str | bytes]`
def read_response(self, disable_decoding: bool = ...) -> Any: ...
class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection): ...
class SentinelConnectionPool(ConnectionPool):
is_master: bool
check_connection: bool
service_name: str
sentinel_manager: Sentinel
def __init__(self, service_name: str, sentinel_manager: Sentinel, **kwargs) -> None: ...
def reset(self) -> None: ...
def owns_connection(self, connection: Connection) -> bool: ...
def get_master_address(self) -> _AddressAndPort: ...
def rotate_slaves(self) -> Iterator[_AddressAndPort]: ...
class Sentinel(SentinelCommands):
sentinel_kwargs: dict[str, Any]
sentinels: list[Redis[Any]]
min_other_sentinels: int
connection_kwargs: dict[str, Any]
def __init__(
self,
sentinels: Iterable[_AddressAndPort],
min_other_sentinels: int = ...,
sentinel_kwargs: dict[str, Any] | None = ...,
**connection_kwargs,
) -> None: ...
def check_master_state(self, state: _SentinelState, service_name: str) -> bool: ...
def discover_master(self, service_name: str) -> _AddressAndPort: ...
def filter_slaves(self, slaves: Iterable[_SentinelState]) -> list[_AddressAndPort]: ...
def discover_slaves(self, service_name: str) -> list[_AddressAndPort]: ...
@overload
def master_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def master_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
@overload
def slave_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def slave_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
def execute_command(self, *args, **kwargs) -> Literal[True]: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
3f5a302519a5d865bc2825e6bb893d2d6014fef7
|
be06759270d816171bc576f973fb536e216aef9a
|
/BioInformatics/GibbsSampler.py
|
c78f901db91be68e7262f5ad6351d734c92ce716
|
[] |
no_license
|
espaciomore/my-code-kata
|
6d6fbeda8ea75813e1c57d45ae1382207e2197fa
|
6c8e1987648350c880e8ab8a038c69608c680cab
|
refs/heads/master
| 2020-12-10T00:31:45.023012
| 2020-10-12T19:35:07
| 2020-10-12T19:35:07
| 18,149,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
from Helpers import *
import random
random.seed(0)
def RandomNumber(probabilities):
sum_ = sum(probabilities)
numbers = []
normalizer = 1000
for i in range(0, len(probabilities)):
weight = int(round(normalizer * probabilities[i]/float(sum_)))
numbers += [i] * (weight if weight > 0 else 1)
number = random.choice(numbers)
return number
def RandomMotifs(dna, k, t):
random_motifs = []
for dna_string in dna:
r = random.choice(range(len(dna_string) - k))
random_motifs.append(dna_string[r:r+k])
return random_motifs
def RandomPattern(profile, text, k):
probabilities = []
for i in range(0, len(text) - k):
pattern = text[i:i+k]
p = ComputeProbability(pattern, profile)
probabilities.append(p)
j = RandomNumber(probabilities)
pattern = text[j:j+k]
return pattern
def GibbsSampler(dna, k, t, N):
motifs = RandomMotifs(dna, k, t)
best_motifs = []
best_score = float('-inf')
for j in range(0, N):
i = random.choice(range(t))
motifs.pop(i)
profile = Profile(motifs, laplaceRule=True)
random_motif = RandomPattern(profile, dna[i], k)
motifs.insert(i, random_motif)
motifs_score = Score(motifs)
if motifs_score > best_score:
best_motifs = motifs[:]
best_score = motifs_score
return best_motifs
def LoopedGibbsSampler(dna, k, t, N, times=1000):
best_motifs = []
best_score = float('-inf')
for i in range(0, times):
motifs = GibbsSampler(dna, k, t, N)
motifs_score = Score(motifs)
if motifs_score > best_score:
best_motifs = motifs[:]
best_score = motifs_score
return best_motifs
|
[
"manuel.cerda@introhive.com"
] |
manuel.cerda@introhive.com
|
b6cdb14de2b229bc4923a7f21f1125f885adb7b4
|
811aeb74f573da200a1471056b451affc9768ec5
|
/rnd-graph-gen.py
|
d421782207655109c051d3008fecbe5115ab8145
|
[] |
no_license
|
Galahad3x/SAT-Solver-Race
|
e36b130938bff13f33cac892881e6530542eea4a
|
ad360e8d857d04853a036d7fb5528be312b3999e
|
refs/heads/master
| 2023-04-21T07:37:46.307353
| 2021-05-16T17:36:59
| 2021-05-16T17:36:59
| 350,693,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
#!/usr/bin/python3
#######################################################################
# Copyright 2020 Josep Argelich
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
# Libraries
import sys
import random
# Classes
class CNF():
"""A CNF formula randomly generated"""
def __init__(self, num_nodes, edge_prob, num_colors):
"""
Initialization
num_nodes: Number of nodes
edge_prob: Edge probability between two nodes
num_colors: Number of colors to color the graph
clauses: List of clauses
"""
self.num_nodes = num_nodes
self.edge_prob = edge_prob
self.num_colors = num_colors
self.clauses = []
self.gen_node_clauses()
self.gen_edge_clauses()
def gen_node_clauses(self):
'''Generate the ALO + AMO clauses for all the nodes'''
for n in range(self.num_nodes):
# ALO
var1 = n * self.num_colors + 1
self.clauses.append([i for i in range(var1, var1 + self.num_colors)])
# AMO
for v1 in range(var1, var1 + self.num_colors - 1):
for v2 in range(v1 + 1, var1 + self.num_colors):
self.clauses.append([-v1, -v2])
def gen_edge_clauses(self):
'''Generates the clauses for each pair of nodes that have an edge with certain prob'''
for n1 in range(self.num_nodes - 1):
for n2 in range(n1 + 1, self.num_nodes):
if random.random() < self.edge_prob:
var1 = n1 * self.num_colors + 1
var2 = n2 * self.num_colors + 1
for c in range(self.num_colors):
self.clauses.append([-(var1 + c), -(var2 + c)])
def show(self):
"""Prints the formula to the stdout"""
sys.stdout.write("c Random CNF formula\n")
sys.stdout.write("p cnf %d %d\n" % (self.num_nodes * self.num_colors, len(self.clauses)))
for c in self.clauses:
sys.stdout.write("%s 0\n" % " ".join(map(str, c)))
# Main
if __name__ == '__main__' :
"""A random CNF generator"""
# Check parameters
if len(sys.argv) < 4 or len(sys.argv) > 5:
sys.exit("Use: %s <num-nodes> <edge-prob> <num-colors> [<random-seed>]" % sys.argv[0])
try:
num_nodes = int(sys.argv[1])
except:
sys.exit("ERROR: Number of nodes not an integer (%s)." % sys.argv[1])
if (num_nodes < 1):
sys.exit("ERROR: Number of nodes must be >= 1 (%d)." % num_nodes)
try:
edge_prob = float(sys.argv[2])
except:
sys.exit("ERROR: Edge probability not a float (%s)." % sys.argv[2])
if (edge_prob < 0 or edge_prob > 1):
sys.exit("ERROR: Edge probability must be in [0, 1] range (%d)." % edge_prob)
try:
num_colors = int(sys.argv[3])
except:
sys.exit("ERROR: Number of colors not an integer (%s)." % sys.argv[3])
if (num_colors < 1):
sys.exit("ERROR: Number of colors must be >= 1 (%d)." % num_colors)
if len(sys.argv) > 4:
try:
seed = int(sys.argv[4])
except:
sys.exit("ERROR: Seed number not an integer (%s)." % sys.argv[4])
else:
seed = None
# Initialize random seed (current time)
random.seed(seed)
# Create a CNF instance
cnf_formula = CNF(num_nodes, edge_prob, num_colors)
# Show formula
cnf_formula.show()
|
[
"joelaumedes@gmail.com"
] |
joelaumedes@gmail.com
|
c2a51fb029d7cb3847d729fe9af9d409c0094084
|
1c1741c479a915d37cd8710b633eb860fb4f6498
|
/test/post_deploy_test/features/steps/steps.py
|
24c5d81d2c5759655233a3cbf123348697b3e91e
|
[] |
no_license
|
EmmanuelOgiji/CircleCI_FlaskApp
|
f3f5dc06979ad3fcd32fdd7140b1bb2b36bc76de
|
bd297dafc64f8985c452191601e01549db7cffc2
|
refs/heads/master
| 2023-05-05T16:50:28.447816
| 2021-05-27T17:29:07
| 2021-05-27T17:29:07
| 370,653,642
| 0
| 0
| null | 2021-05-25T13:44:25
| 2021-05-25T10:30:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
"""
Requires chromedriver, this should also be approved
"""
from assertpy import assert_that
from behave import *
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
@given(u'I have my browser setup')
def step_impl(context):
# setup webdriver
options = webdriver.ChromeOptions()
options.add_argument('--headless') # set as headless as we don't need to view UI
options.add_argument('--disable-dev-shm-usage') # set flag to reduce memory usage
context.driver = webdriver.Chrome(
options=options
)
@when(u'I access the homepage of the app')
def step_impl(context):
# get url from command line userdata
url = context.config.userdata['url']
# go to url
context.driver.get(url)
# wait for button to appear with max timeout of 15 secs
WebDriverWait(context.driver, 15).until(
ec.element_to_be_clickable((By.ID, "authorButton"))
)
@when(u'I click the button that says Who built this?')
def step_impl(context):
# find button
button = context.driver.find_element_by_id("authorButton")
# click button
button.click()
# wait for page to render
WebDriverWait(context.driver, 15).until(
ec.visibility_of_all_elements_located
)
@then(u'Emmanuel Pius-Ogiji is displayed')
def step_impl(context):
# check that page contains "Emmanuel Pius-Ogiji"
assert_that(context.driver.page_source).contains("Emmanuel Pius-Ogiji")
def after_all(context):
# Clean up
context.driver.quit()
|
[
"epiusogiji@yahoo.com"
] |
epiusogiji@yahoo.com
|
246d0c5aefaef370f3e696a2d3d9ccaac0b641dc
|
5c3f49520cdc2c90b22583c4f9bdc230c2553370
|
/05_Diccionarios/06_DiccionariosPython.py
|
6ae4535acd97093af7524397aa2ee1a3d8c94675
|
[] |
no_license
|
fredy-glz/Ejercicios-de-Programacion-con-Python---Aprende-con-Alf
|
0553def06d6f95031edb7d1fe36f56b0bcd00437
|
f18b34ef3780dd803d2cfb2046997604c22b8e3e
|
refs/heads/main
| 2023-01-14T16:52:30.289609
| 2020-11-27T06:08:15
| 2020-11-27T06:08:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# JOSE ALFREDO ROMERO GONZALEZ
# 23/11/2020
d_p = {}
while True:
llave = input("Que dato quieres guardar? (enter para terminar): ")
if llave == "":
break
d_p[llave] = input(str(llave) + ": ")
print(d_p)
# SOLUCION DE https://aprendeconalf.es/
person = {}
more = 'Si'
while more=='Si':
key = input('¿Qué dato quieres introducir? ')
value = input(key + ': ')
person[key] = value
print(person)
more = input('¿Quieres añadir más información (Si/No)? ')
|
[
"noreply@github.com"
] |
noreply@github.com
|
dda6c51639fa901688e4f15aa25e7831613e5c0d
|
dbff0356a195cfa12d0b4d67203b23c2c7008fc9
|
/forth_project/forth_project/settings.py
|
1628a35c6597875410abb9095b0daf740f46a41e
|
[] |
no_license
|
seanjoc/django-deployment-example
|
8d020b335d05ed9c5e56b9238272e1aba14c6fa6
|
f5c7ca01341063992e05bafd9d0f00f205afd195
|
refs/heads/master
| 2020-07-18T07:41:57.752907
| 2019-09-15T15:07:05
| 2019-09-15T15:07:05
| 206,208,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
"""
Django settings for forth_project project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6iii*(xn8%ki7qdy9*pwp#1c(t8r(_cv7a-*$ej=rx)kkhhvdq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['seanjoc16.pythonanywhere.com',
'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'forth_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'forth_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'forth_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"seanjoc16@gmail.com"
] |
seanjoc16@gmail.com
|
824bf7aab2b27b626049a70f8b430cc71c11a4c1
|
0991f11a710c19e61a41a9893613f4d0a1394640
|
/T3/database/Inserção/Scripts Python/gera_ocorrencias.py
|
e3128b8be9b8a9314476fa4d4e17519cb35e1c30
|
[] |
no_license
|
gsalibi/databases-course
|
a58045619815bfb730aa18cc8da553874a77f7eb
|
bb89acc8c8b0d88b6a2319d2cc0c7a60632a382f
|
refs/heads/master
| 2020-11-27T23:49:33.213511
| 2019-12-23T01:00:59
| 2019-12-23T01:00:59
| 229,652,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
from random import randint, choice
out_file = open("insere_ocorrencias.sql", "w", encoding="utf8")
out_file.write("use GetLove;\n")
ID_USUARIO_MAX = 499
ID_EMPRESA_MAX = 49
ID_MODERADOR_MAX = 9
N = 200
tipos = ["getlove", "empresa", "usuario"]
for i in range(N):
tipo = choice(tipos)
id_pessoa = randint(0,ID_USUARIO_MAX)
id_moderador = randint(0, ID_MODERADOR_MAX)
if tipo == "getlove":
id_implicado = "NULL"
elif tipo == "empresa":
id_implicado = randint(0, ID_EMPRESA_MAX)
else:
while True:
id_implicado = randint(0, ID_USUARIO_MAX)
if id_implicado != id_pessoa:
break
ref_arquivo_mensagem = "msg" + str(i + 3871) + ".txt"
data_ocorrencia = "%d-%02d-%02d" % (randint(2018,2028), randint(1,12), randint(1,28))
tupla_ocorrencia = [str(i), tipo, str(id_implicado), ref_arquivo_mensagem, data_ocorrencia]
tupla_ocorrencia = "(" + ",".join(["\'%s\'" % v for v in tupla_ocorrencia]) + ")"
tupla_ocorrencia = tupla_ocorrencia.replace("\'NULL\'", "NULL")
out_file.write("insert into Ocorrencia values " + tupla_ocorrencia + ";\n")
tupla_avalia = [str(i), str(id_moderador)]
tupla_avalia = "(" + ",".join(["\'%s\'" % v for v in tupla_avalia]) + ")"
out_file.write("insert into Avalia values " + tupla_avalia + ";\n")
tupla_abre = [str(i), str(id_pessoa)]
tupla_abre = "(" + ",".join(["\'%s\'" % v for v in tupla_abre]) + ")"
out_file.write("insert into Abre values " + tupla_abre + ";\n")
out_file.close()
|
[
"gustavohstorti@gmail.com"
] |
gustavohstorti@gmail.com
|
df11eb12d02f73346e7096e6039400e85381a2bb
|
ab5ef28065b0ad3f8d86fc894be569074a4569ea
|
/mirari/CRYE/migrations/0028_auto_20190406_1344.py
|
99b9026478cf2c6aaf96926be89b77b7d4bbecdd
|
[
"MIT"
] |
permissive
|
gcastellan0s/mirariapp
|
1b30dce3ac2ee56945951f340691d39494b55e95
|
24a9db06d10f96c894d817ef7ccfeec2a25788b7
|
refs/heads/master
| 2023-01-22T22:21:30.558809
| 2020-09-25T22:37:24
| 2020-09-25T22:37:24
| 148,203,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# Generated by Django 2.0.5 on 2019-04-06 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRYE', '0027_auto_20190406_1331'),
]
operations = [
migrations.AlterField(
model_name='walletcredit',
name='walletcredit_tipo',
field=models.CharField(choices=[('ARRENDAMIENTO', 'ARRENDAMIENTO'), ('CREDITO', 'CREDITO')], default='CREDITO', max_length=250, verbose_name='Tipo de cartera'),
),
]
|
[
"g@gustavo-castellanos.com"
] |
g@gustavo-castellanos.com
|
38d10d76d3a920996f12ea2a2bb210288491fa42
|
06ff1c8056cf5408e6acd5396d4c2ea4fb50cd92
|
/04Prototype/Students/2019/JarzembinskiBartlomiej/prototype/computer_manager.py
|
b37ed3e44f66fb52314021e0af51f7c14cc7c2d7
|
[] |
no_license
|
kasa4565/DesignPatterns
|
b654dc8072940634ffcae52d297dc7da5d93c0b2
|
56dfac636b49266aa86f5b61cce0f9fb76a2b859
|
refs/heads/master
| 2020-08-23T08:10:09.661667
| 2019-08-15T07:54:30
| 2019-08-15T07:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
from computer_prototype import ComputerPrototype
class ComputerManager:
def __init__(self):
self._registry = {}
def get_computer(self, name):
return self._registry[name]
def set_computer(self, name, computer):
self._registry.update({name: computer})
|
[
"bjarzembinski@yahoo.pl"
] |
bjarzembinski@yahoo.pl
|
6c54a871cb05593fcccc602e98fba4042fe01291
|
8473f9f8b9de41af9ee17ac769e85cbd7b4013c2
|
/feature-extractor/color.py
|
e515cf0ffdfe7f834d938f5d22cefab49b6326c8
|
[
"MIT"
] |
permissive
|
rzumer/CICAV
|
9529623c9d57c093dc451cdd70aafef5ebe6fdb8
|
c1b84c1e14d5c84e841a2bc61089b70d0a2863bf
|
refs/heads/master
| 2020-04-08T20:50:42.471523
| 2019-06-03T12:20:35
| 2019-06-03T12:20:35
| 159,717,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,846
|
py
|
# coding=utf-8
'''
Created on Jun 19, 2017
@author: Raphaël Zumer
'''
import os
import cv2
import numpy as np
import peakutils
from skimage.feature.texture import local_binary_pattern
from scipy.stats import itemfreq
from sklearn.preprocessing import normalize
import heapq
import random
def getHistogramPeaks(path):
if not os.path.exists(path):
print(path, "does not exist")
return None
print "Handling", os.path.basename(path)
vidcap = cv2.VideoCapture(path)
framenum = 0
peaks = 0
peaks16 = 0
peaks8 = 0
lbppeaks = 0
peakfreq = [0 for _ in range(34)]
peakfreq16 = [0 for _ in range(18)]
peakfreq8 = [0 for _ in range(10)]
lbppeakfreq = [0 for _ in range(28)]
lasthist = [0 for _ in range(32)]
lasthist16 = [0 for _ in range(16)]
lasthist8 = [0 for _ in range(8)]
histdiffs = []
histdiffs16 = []
histdiffs8 = []
success, image = vidcap.read()
while success:
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.medianBlur(grey, 3)
histogram = cv2.calcHist([blurred], [0], None, [32], [0, 256])
cv2.normalize(histogram, histogram)
hist16 = cv2.calcHist([blurred], [0], None, [16], [0, 256])
cv2.normalize(hist16, hist16)
hist8 = cv2.calcHist([blurred], [0], None, [8], [0, 256])
cv2.normalize(hist8, hist8)
histarray = np.asarray(histogram).ravel()
hist16 = np.asarray(hist16).ravel()
hist8 = np.asarray(hist8).ravel()
if framenum % 5 == 0:
radius = 3
no_points = 8 * radius
lbp = local_binary_pattern(grey, no_points, radius, method='uniform')
cv2.normalize(lbp, lbp)
lbpfreq = itemfreq(lbp.ravel())
lbphist = lbpfreq[:, 1]/sum(lbpfreq[:, 1])
lbphist = np.asarray(lbphist).ravel()
lbphist = np.insert(lbphist, 0, 0)
lbphist = np.append(lbphist, 0)
currentlbppeaks = peakutils.indexes(lbphist, 0.3 * (28 / float(26)), 1) # change dist back to 2 if it worsens
for val in currentlbppeaks:
lbppeakfreq[val] += 1
lbppeaks += len(currentlbppeaks)
minbins = [0 for _ in range(32)]
for idx, hbin in enumerate(histarray):
minbins[idx] = min(hbin, lasthist[idx])
minbins16 = [0 for _ in range(16)]
for idx, hbin in enumerate(hist16):
minbins16[idx] = min(hbin, lasthist16[idx])
minbins8 = [0 for _ in range(8)]
for idx, hbin in enumerate(hist8):
minbins8[idx] = min(hbin, lasthist8[idx])
histarray = np.insert(histarray, 0, 0)
histarray = np.append(histarray, 0)
hist16 = np.insert(hist16, 0, 0)
hist16 = np.append(hist16, 0)
hist8 = np.insert(hist8, 0, 0)
hist8 = np.append(hist8, 0)
currentpeaks = peakutils.indexes(histarray, 0.3 * (34 / float(32)), 1)
curpeaks16 = peakutils.indexes(hist16, 0.3 * (18 / float(16)), 1)
curpeaks8 = peakutils.indexes(hist8, 0.3 * (10 / float(8)), 1)
for val in currentpeaks:
peakfreq[val] += 1
for val in curpeaks16:
peakfreq16[val] += 1
for val in curpeaks8:
peakfreq8[val] += 1
histdiffs.append(1 - (sum(minbins) / float(sum(histarray))))
histdiffs16.append(1 - (sum(minbins16) / float(sum(hist16))))
histdiffs8.append(1 - (sum(minbins8) / float(sum(hist8))))
peaks += len(currentpeaks)
peaks16 += len(curpeaks16)
peaks8 += len(curpeaks8)
lasthist = histarray
lasthist16 = hist16
lasthist8 = hist8
framenum += 1
success, image = vidcap.read()
bias = 0
bias16 = 0
bias8 = 0
lbpbias = 0
if sum(peakfreq) != 0:
#bias = (sum(heapq.nlargest(2, peakfreq)) / float(sum(peakfreq)))
bias = np.var(peakfreq)
bias16 = np.var(peakfreq16)
bias8 = np.var(peakfreq8)
if sum(lbppeakfreq) != 0:
#lbpbias = max(lbppeakfreq) / float(sum(lbppeakfreq))
lbpbias = np.var(lbppeakfreq)
meandiff = np.mean(histdiffs)
meandiff16 = np.mean(histdiffs16)
meandiff8 = np.mean(histdiffs8)
lbpmax = np.argmax(lbppeakfreq)
print(peaks, "histogram peaks (32 bins),", lbppeaks, "lbp peaks")
print(meandiff, "mean diff")
return peaks, peaks16, peaks8, lbppeaks, lbpmax, meandiff, meandiff16, meandiff8, bias, bias16, bias8, lbpbias
|
[
"rzumer@tebako.net"
] |
rzumer@tebako.net
|
26a7ff500f1586c705fe6a6fcaa0c1c82506c37b
|
f095d8c53e96d6b1f15350c4d4c2c84dac680cab
|
/area_100x100_grid/100x100_grid 100_nodes/random_point100.py
|
afb1d4f8dfebfe95cb218e8244c42f93ec747d86
|
[] |
no_license
|
rubiruchi/wsn_protocol-BEE-MAC
|
593e3862681d10c76f28909a1843ab1174ebf684
|
e9f9f58e5a0ea264cb9674ea43e548f76a2bd521
|
refs/heads/master
| 2020-06-04T13:58:19.113633
| 2018-12-28T16:50:05
| 2018-12-28T16:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import random
factor=4
X=[]
Y=[]
number_of_points = 100
for i in range(number_of_points):
X.append(-50*factor+100*factor*random.random())
Y.append(-50*factor+100*factor*random.random())
file_name='random_points100.txt'
with open(file_name,'w') as f:
for i in X:
f.write(str(i)+' ')
f.write('\n')
for i in Y:
f.write(str(i)+' ')
|
[
"mr.rajatgupta12345@gmail.com"
] |
mr.rajatgupta12345@gmail.com
|
1299cea4d3943a561d6398304b9ae9e60db0a7a6
|
9eadbd115634a6f447af4657440fffafbf5648b1
|
/com/python/learn/crawlertest/dpjsongeneration.py
|
36c04b988f9c0fe7220fef24cd208011a8f1943a
|
[] |
no_license
|
awsbigdata/learnpython
|
910657fe7d178c209992df682bf2a985689f7edb
|
7d4d4782b07653b47190935eaa269ed6b2ba1cb2
|
refs/heads/master
| 2021-10-13T14:52:17.585320
| 2021-10-12T08:23:18
| 2021-10-12T08:23:18
| 161,269,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
import boto3
import json
datapipeline_client = boto3.client('datapipeline',region_name='us-east-1')
pipelineId='df-07575631T1Z9SV9TEGOA'
output_file="/tmp/"+pipelineId+".json"
output = open(output_file, "w")
json_dict=datapipeline_client.get_pipeline_definition(pipelineId=pipelineId)
json.dump(json_dict, output)
print("find the json here")
print(output_file)
|
[
"srramas@amazon.com"
] |
srramas@amazon.com
|
78f993272d394858581bb3c7ad0eba0f8d460217
|
c7a34a925df7e95ea296a37c40a93a3984810501
|
/manage/admin.py
|
7031490a56cd7ac9a39434147f3e96e1b34b4ab9
|
[] |
no_license
|
croolicjah/next-to-202-project
|
28d77e30b1310be7ce32ec8d914bc93f9dc1bc07
|
f0f5ddba5f84f2f7f3ed913e060cacf610eac864
|
refs/heads/master
| 2022-12-13T21:55:21.805640
| 2018-11-19T21:34:51
| 2018-11-19T21:34:51
| 153,174,330
| 1
| 0
| null | 2022-12-08T02:59:33
| 2018-10-15T20:04:22
|
CSS
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
from django.contrib import admin
# Register your models here.
from .models import ArtList
from publishing.admin import ArticleAdmin
from publishing.models import Category, Article
@admin.register(ArtList)
class ArtListAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "positions":
kwargs["queryset"] = Category.objects.order_by('number')
return super(ArtListAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
list_display = ('artlist', 'positions',)
|
[
"j.krawczewski@gmail.com"
] |
j.krawczewski@gmail.com
|
4137f2cdd8e343b7025f755ff5cdeef1f2386bd5
|
0294c53a4322440110b2ca34752988949b91f2f6
|
/code/Experiments-CelebA/label_ablation/main_label_ablation.py
|
c4474fcd978f91ab4086d1ff1da81ed9475fbb72
|
[] |
no_license
|
lokhande-vishnu/FairALM
|
31ee3cbf3505d2b3d15e03d7cd7437685b2cc524
|
4f188a3fe0839693e3b7380b82d9dda155d9057e
|
refs/heads/master
| 2020-09-28T10:20:40.931178
| 2020-09-25T04:15:02
| 2020-09-25T04:15:02
| 226,757,715
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,631
|
py
|
import os
import time
import numpy as np
import pandas as pd
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
from datasets_celebA import get_loaders
from fcn import fcn, MyHingeLoss
from utils import *
import pdb
def run_all(config):
# Architecture
NUM_FEATURES = config['IMAGE_SIZE']*config['IMAGE_SIZE']*3 # Use 128*128 for resnet18 and vgg
NUM_CLASSES = 2
GRAYSCALE = False
# Check these hyper manually
if config['LR'] == 0.001:
config['OPTIMIZER_'] = 'Adam'
else:
config['OPTIMIZER_'] = 'SGD'
config['MODEL_'] = 'fcn'
config['SHUFFLE_'] = True
config['file_name'] = '/tmp/' + config['RESPONSE'] + '_' \
+ config['PROTECTED'] + '_' \
+ config['ALGORITHM'] + '_' \
+ config['OPTIMIZER_'] + '_' \
+ str(config['ETA_INIT'])
def save_everything(epoch, net, optimizer, train_acc, val_acc):
# Save checkpoint.
state = {
'epoch': epoch,
'net': net.state_dict(),
'optimizer': optimizer.state_dict(),
'train_acc': train_acc,
'val_acc': val_acc
}
if not os.path.isdir(config['file_name']):
os.mkdir(config['file_name'])
torch.save(state, config['file_name'] + '/ckpt_' + str(epoch) + '.t7')
train_loader, valid_loader, test_loader = get_loaders(IMAGE_SIZE = config['IMAGE_SIZE'],
BATCH_SIZE = config['BATCH_SIZE'],
label_attr = config['RESPONSE'],
protected_attr = config['PROTECTED'])
model = fcn(NUM_FEATURES)
model = model.cuda()
criterion = MyHingeLoss()
if config['LR'] == 0.001:
optimizer = torch.optim.Adam(model.parameters(), lr=config['LR'])
else:
optimizer = torch.optim.SGD(model.parameters(), lr=config['LR'])
lam0 = config['LAM0_PRIOR']
lam1 = config['LAM1_PRIOR']
lam2 = config['LAM2_PRIOR']
eta = config['ETA_INIT']
print(config)
start_time = time.time()
try:
for epoch in range(config['NUM_EPOCHS']):
eta = eta * config['ETA_BETA']
model.train()
for batch_idx, (features, targets, protected) in enumerate(train_loader):
if config['DEBUG'] and batch_idx > 1:
break
features = features.cuda()
targets = targets.cuda()
protected = protected.cuda()
if config['ALGORITHM'] == 'FAIR_ALM':
for _ in range(config['NUM_INNER']):
### FORWARD AND BACK PROP
logits, _ = model(features)
loss_all = criterion(logits, targets)
loss_t0_s0 = loss_all[(targets == 0) & (protected==0)].mean()
loss_t0_s1 = loss_all[(targets == 0) & (protected==1)].mean()
loss_t1_s0 = loss_all[(targets == 1) & (protected==0)].mean()
loss_t1_s1 = loss_all[(targets == 1) & (protected==1)].mean()
train_loss = loss_all.mean()
penalty_loss = loss_t1_s0 - loss_t1_s1
# Primal Update
optimizer.zero_grad()
loss = loss_all.mean() \
+ (eta/4 + (lam0 - lam1)/2) * loss_t1_s0 \
+ (eta/4 + (lam1 - lam0)/2) * loss_t1_s1
loss.backward()
optimizer.step()
# Dual Update
lam0 = 0.5*(lam0 - lam1) + 0.5 * eta * (loss_t1_s0.item() - loss_t1_s1.item())
lam1 = 0.5*(lam1 - lam0) + 0.5 * eta * (loss_t1_s1.item() - loss_t1_s0.item())
else:
### FORWARD AND BACK PROP
logits, _ = model(features)
loss_all = criterion(logits, targets)
loss_t0_s0 = loss_all[(targets == 0) & (protected==0)].mean()
loss_t0_s1 = loss_all[(targets == 0) & (protected==1)].mean()
loss_t1_s0 = loss_all[(targets == 1) & (protected==0)].mean()
loss_t1_s1 = loss_all[(targets == 1) & (protected==1)].mean()
train_loss = loss_all.mean()
penalty_loss = loss_t0_s0 - loss_t0_s1
optimizer.zero_grad()
loss = loss_all.mean()
loss.backward()
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | train_loss: %.4f | penalty_loss: %.4f'%(epoch+1, config['NUM_EPOCHS'], batch_idx,len(train_loader), train_loss, penalty_loss))
print('eta: %.3f | lam0: %.3f | lam1: %.3f | lam2: %.3f' % (eta, lam0, lam1, lam2))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
train_stats = compute_accuracy(config, model, train_loader)
print_stats(config, epoch, train_stats, stat_type='Train')
valid_stats = compute_accuracy(config, model, valid_loader)
print_stats(config, epoch, valid_stats, stat_type='Valid')
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
if epoch == 4 and config['SAVE_CKPT']:
save_everything(epoch, model, optimizer, train_stats, valid_stats)
except:
pass
if __name__ == '__main__':
### SETTINGS
config = {}
config['ALGORITHM'] = 'FAIR_ALM' # 'FAIR_ALM' or 'L2_PENALTY' or 'NO_CONSTRAINTS'
config['CONSTRAINT'] = 'DEO' # DEO, DDP, PPV
config['LAM0_PRIOR'] = 0.
config['LAM1_PRIOR'] = 0.
config['LAM2_PRIOR'] = 0.
config['ETA_INIT'] = 0.01
config['ETA_BETA'] = 1.01
config['SAVE_CKPT'] = True
config['DEBUG'] = False
config['RESPONSE'] = '5_o_Clock_Shadow'
config['PROTECTED'] = 'Young'
# Hyperparameters
config['LR'] = 0.01
config['NUM_EPOCHS'] = 5
config['NUM_INNER'] = 1
config['BATCH_SIZE'] = 1024
config['IMAGE_SIZE'] = 28
list_response = ['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes',
'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair',
'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard',
'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline',
'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace',
'Wearing_Necktie']
for response in list_response:
config['RESPONSE'] = response
run_all(config)
|
[
"lokhande@cs.wisc.edu"
] |
lokhande@cs.wisc.edu
|
46c5e2b2ed08ba91155f44d266097399816d6ca5
|
dcce56815dca2b18039e392053376636505ce672
|
/dumpscripts/atexit_simple.py
|
cadf7e713067bfdb1023e1a1054adc7c45bab915
|
[] |
no_license
|
robertopauletto/PyMOTW-it_3.0
|
28ff05d8aeccd61ade7d4107a971d9d2576fb579
|
c725df4a2aa2e799a969e90c64898f08b7eaad7d
|
refs/heads/master
| 2021-01-20T18:51:30.512327
| 2020-01-09T19:30:14
| 2020-01-09T19:30:14
| 63,536,756
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
# atexit_simple.py
import atexit
def all_done():
print('all_done()')
print('In registrazione')
atexit.register(all_done)
print('Registrato')
|
[
"roberto.pauletto@gmail.com"
] |
roberto.pauletto@gmail.com
|
8df4144788164a6ec89107cc0ade23a41752bfe4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_scheduled.py
|
8a085638d8ee5bfb1423f7fbfc4217347f5939be
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.verbs._schedule import _SCHEDULE
#calss header
class _SCHEDULED(_SCHEDULE, ):
def __init__(self,):
_SCHEDULE.__init__(self)
self.name = "SCHEDULED"
self.specie = 'verbs'
self.basic = "schedule"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4f46ea8ebc196efd8adfb4e87ab6901f2405e443
|
a961aa2542979965bc4a8511e544218b26b34aad
|
/deleteByTag.py
|
b0400c2b1224e300be12decac1079b6bffbe11ee
|
[] |
no_license
|
alexluckerman/pocket-deleteByTag
|
39c5c2c4cc38ee55600844e5ab19571c0e8b49f6
|
6f80243d561585fb77f74db8a82b1e8508b43d05
|
refs/heads/master
| 2020-03-07T19:26:42.256699
| 2018-04-01T20:56:39
| 2018-04-01T20:56:39
| 127,670,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
# Alex Luckerman, 4/1/18
# Deletes Pocket articles by a specific tag
from pocket import Pocket
import webbrowser
# Consumer key for the desktop application
consumer_key=
#Check to see if authorization has already been completed
token_file = open("access_token.txt", 'a')
token_file.close()
token_file = open("access_token.txt", 'r')
if token_file.mode == "r":
print("Access token file opened successfully")
access_token = token_file.read()
print(access_token)
#Authorize the app and obtain the access token
if access_token == "":
request_token = Pocket.get_request_token(consumer_key=consumer_key, redirect_uri='https://alexlucky.me/')
auth_url = Pocket.get_auth_url(code=request_token, redirect_uri='https://alexlucky.me/')
print("Opening your browser to continue authorization...")
webbrowser.open(auth_url, new=2, autoraise=True)
print("If that failed, you can open the URL manually:")
print(auth_url)
done = raw_input("Once you're finished with that, come back and press enter\n")
user_credentials = Pocket.get_credentials(consumer_key=consumer_key, code=request_token)
access_token = user_credentials['access_token']
print(access_token)
token_file.close()
token_file = open("access_token.txt", "a")
token_file.write(access_token)
token_file.close()
else:
print('Found existing authorization code')
# Prompt user about the tag whose articles must be deleted
tag = raw_input('What tag do you want to delete everything for?\n')
pocket_instance = Pocket(consumer_key, access_token)
get_return = pocket_instance.get(consumer_key, access_token, tag=tag)
print("This is as far as I got, here's what Pocket sent back for that tag")
items = get_return[0][u'list'].keys()
print(items)
for item_id in items:
pocket_instance.delete(item_id)
pocket_instance.commit()
print("Done. All items for that tag should now be deleted.")
|
[
"noreply@github.com"
] |
noreply@github.com
|
2b3eae0338ae6d36f2b0c1814e6da04e9f902df0
|
ec288bf2f3c9d70cfe9f7e9cb307f151b64addec
|
/src/F121UdpData/packets/packetData/PacketCarTelemetryData.py
|
e946014231137d425c6ff174061fdb89533d621d
|
[
"MIT"
] |
permissive
|
MaximDecherf/F1-21_udp_data
|
3764a7aab07810dad213ceee67d8d91a534d43a0
|
e15761d26c9ce9b2b47b04e683f08553ed4568dd
|
refs/heads/main
| 2023-07-01T12:52:20.873324
| 2021-08-08T15:43:14
| 2021-08-08T15:43:14
| 390,027,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
import struct
from .classes.CarTelemetryData import CarTelemetryData
class PacketCarTelemetryData:
BYTES_SPLITS = {'car_telemetry_data' : [False, 60, CarTelemetryData ,1320], 'mfd_panel_index' : [True, int, 1321], 'mfd_panel_index_secondary_player' : [True, int, 1322], 'suggested_gear' : [True, int, 1323]}
NUMBER_DECODER = {'mfd_panel_index' : ['car setup', 'pits', 'damage', 'engine', 'temperatures']}
def __init__(self, body_data, decode_numbers=True):
self.body_data = body_data
end_prev = 0
for key, value in self.BYTES_SPLITS.items():
if value[0]:
if value[1] == int:
setattr(self, key, int.from_bytes(body_data[end_prev:value[2]], byteorder='little'))
end_prev = value[2]
elif value[1] == float:
setattr(self, key, struct.unpack('<f', body_data[end_prev:value[2]]))
end_prev = value[2]
else:
data_list = []
while value[3] > end_prev:
data_list.append(value[2](body_data[end_prev:end_prev+value[1]]))
end_prev = end_prev+value[1]
setattr(self, key, data_list)
end_prev = value[3]
self.data_lenhgt = len(body_data)
if decode_numbers:
self.decode_numbers()
def decode_numbers(self):
for key, decoder in self.NUMBER_DECODER.items():
int_value = getattr(self, key)
if int_value < 0:
setattr(self, key, 'invalid')
else:
if int_value == 255:
setattr(self, key, 'MFD closed')
else:
setattr(self, key, decoder[int_value])
def __repr__(self):
return str(self.__dict__)
def __str__(self):
full_dict = self.__dict__
del full_dict['body_data']
return str(self.__class__) + " : " + str(full_dict)
|
[
"52461982+MaximDecherf@users.noreply.github.com"
] |
52461982+MaximDecherf@users.noreply.github.com
|
c37f7438c8b40540589d7cc283ef83d37f1d00e4
|
91718445561138814ea70c2ffc5f98b8b826a822
|
/Trackpad/TestLogTool/trunk/lib/testlogformat.py
|
34bc7708cb69beb72e8857bea8b0e2f1206efb18
|
[] |
no_license
|
fanmuzhi/Cypress
|
5e332236308f1e430f88f0f3f08b1e2252fb9d70
|
1cca98bafb8607b87b62cc3e97447be4290d8575
|
refs/heads/master
| 2021-05-29T07:52:05.337265
| 2015-04-17T00:58:24
| 2015-04-17T00:58:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
import re
class TestLogFormat:
def change_D_to_data(self, file_path):
log_file=open(file_path, "r+") #open file for read and write
log_string=log_file.read()
regex_m=re.compile(r"<D\d+>")
log_string=regex_m.sub('<data>', log_string)
regex_n=re.compile(r"</D\d+>")
log_string=regex_n.sub('</data>', log_string)
log_file.seek(0) #point to the head of file
log_file.write(log_string) #write the string
log_file.close()
def change_serialnumber_to_string(self, file_path):
log_file=open(file_path, "r") #open file for read
log_string=log_file.read()
regex = re.compile(r"<Serial_Number>(?P<SerialNumber>\w{19})</Serial_Number>")
r = regex.search(log_string)
log_file.close()
if r is not None:
serialnumber="<Serial_Number>'"+ r.group(1)+"'</Serial_Number>" # group(1) is the second match.
log_string=regex.sub(serialnumber, log_string) #replace with the ' '
log_file=open(file_path, "w") #open file for write
log_file.truncate()
log_file.seek(0) #point to the head of file
log_file.write(log_string) #write the string
def change_time_format(self, file_path):
log_file=open(file_path, "r") # open the read of file
log_string=log_file.read()
regex=re.compile(r"<Test_Time>(?P<TestTime>.{20})</Test_Time>")
r=regex.search(log_string)
log_file.close() # close the read of file
if r is not None:
testtime="<Test_Time>" + r.group(1)[:-1] + "</Test_Time>" # cut the last Z
log_string = regex.sub(testtime, log_string)
log_file=open(file_path, "w") # open the write of file
log_file.truncate()
log_file.seek(0) #point to the head of file
log_file.write(log_string) #write the string
|
[
"pzho@cypress.com"
] |
pzho@cypress.com
|
d7ba142cccb319cecf2c13e412c4320f6675ace1
|
a60bf4ec6da1dc6226634a74532b3c0967cab4ae
|
/core/create_db.py
|
fd9a581376a0d2f3eeb31922896dc6f84341ccf2
|
[] |
no_license
|
codecyou/finance
|
0f12b4476d3438ca1890ceee4770a42030eb2014
|
5103527fa98776878145bf09e70db454a6c764db
|
refs/heads/main
| 2023-06-26T13:03:15.492252
| 2021-07-15T01:14:41
| 2021-07-15T01:14:41
| 339,940,830
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,768
|
py
|
# 用于创建数据库 及表结构 及视图
import sqlite3
import os
try:
from conf import settings
except Exception:
print("from conf import settings 失败!")
# 创建支出表
payments_table = '''CREATE TABLE if not exists payments(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100),
money FLOAT NOT NULL,
account_id INT ,
seller_id INT ,
category_pid INT ,
category_cid INT ,
member_id INT ,
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建收入表
incomes_table = '''CREATE TABLE if not exists incomes(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100),
money FLOAT NOT NULL,
account_id INT ,
seller_id INT ,
category_pid INT ,
category_cid INT ,
member_id INT ,
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建借入表
borrows_table = '''CREATE TABLE if not exists borrows(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100),
money FLOAT NOT NULL,
account_id INT ,
seller_id INT ,
category_pid INT ,
category_cid INT ,
member_id INT ,
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建借出表
lends_table = '''CREATE TABLE if not exists lends(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100),
money FLOAT NOT NULL,
account_id INT ,
seller_id INT ,
category_pid INT ,
category_cid INT ,
member_id INT ,
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建还款表
repayments_table = '''CREATE TABLE if not exists repayments(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100),
money FLOAT NOT NULL,
account_id INT ,
seller_id INT ,
category_pid INT ,
category_cid INT ,
member_id INT ,
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建成员/消费者/使用者表
notes_table = '''CREATE TABLE if not exists notes(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
note_date TEXT NOT NULL,
title VARCHAR(50) NOT NULL, -- 记录操作 比如冲水30吨
remark VARCHAR(100), -- 记录操作后剩余量 比如冲水后 还有40吨水
remark2 VARCHAR(200), -- 记录其他备注事项
create_time TEXT NOT NULL,
modify_time TEXT,
is_delete bit default 0
);'''
# 创建账户表
accounts_table = '''CREATE TABLE if not exists accounts(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100)
);'''
# 创建商家/交易对象表
sellers_table = '''CREATE TABLE if not exists sellers(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100)
);'''
# 创建类别表
# 支出类别
pay_categorys_table = '''CREATE TABLE if not exists pay_categorys(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
title VARCHAR(50) NOT NULL,
pid INT
);'''
# 收入类别
income_categorys_table = '''CREATE TABLE if not exists income_categorys(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
title VARCHAR(50) NOT NULL,
pid INT
);'''
# 创建成员/消费者/使用者表
members_table = '''CREATE TABLE if not exists members(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
title VARCHAR(50) NOT NULL,
remark VARCHAR(100)
);'''
tables = [payments_table, incomes_table, borrows_table, lends_table, repayments_table, notes_table,
accounts_table, sellers_table, pay_categorys_table, income_categorys_table, members_table]
# 创建视图
v_payments_info = """create view if not exists v_payments_info as
select p.id,p.note_date,p.title,p.remark,p.money,a.title as account,s.title as seller,
c_p.title as category_p,c_c.title as category_c,m.title as member,p.create_time,p.modify_time
from payments as p left join accounts as a on p.account_id = a.id
left join sellers as s on p.seller_id = s.id
left join pay_categorys as c_p on p.category_pid = c_p.id
left join pay_categorys as c_c on p.category_cid = c_c.id
left join members as m on p.member_id = m.id where is_delete=0;
"""
v_incomes_info = """create view if not exists v_incomes_info as
select p.id,p.note_date,p.title,p.remark,p.money,a.title as account,s.title as seller,
c_p.title as category_p,c_c.title as category_c,m.title as member,p.create_time,p.modify_time
from incomes as p left join accounts as a on p.account_id = a.id
left join sellers as s on p.seller_id = s.id
left join income_categorys as c_p on p.category_pid = c_p.id
left join income_categorys as c_c on p.category_cid = c_c.id
left join members as m on p.member_id = m.id where is_delete=0;
"""
v_borrows_info = """create view if not exists v_borrows_info as
select p.id,p.note_date,p.title,p.remark,p.money,a.title as account,s.title as seller
,p.create_time,p.modify_time
from borrows as p left join accounts as a on p.account_id = a.id
left join sellers as s on p.seller_id = s.id
where is_delete=0;
"""
v_lends_info = """create view if not exists v_lends_info as
select p.id,p.note_date,p.title,p.remark,p.money,a.title as account,s.title as seller
,p.create_time,p.modify_time
from lends as p left join accounts as a on p.account_id = a.id
left join sellers as s on p.seller_id = s.id
where is_delete=0;
"""
v_repayments_info = """create view if not exists v_repayments_info as
select p.id,p.note_date,p.title,p.remark,p.money,a.title as account,s.title as seller
,p.create_time,p.modify_time
from repayments as p left join accounts as a on p.account_id = a.id
left join sellers as s on p.seller_id = s.id
where is_delete=0;
"""
v_notes_info = """create view if not exists v_notes_info as select * from notes where is_delete=0"""
views = [v_payments_info, v_incomes_info, v_borrows_info, v_lends_info, v_repayments_info, v_notes_info]
def create():
# 用于创建数据库表结构
conn = sqlite3.connect(os.path.join(settings.DB_PATH))
print("Opened database successfully")
c = conn.cursor()
# 创建表结构
for item in tables:
c.execute(item)
conn.commit()
print("创建数据库表结构完成!")
# 创建视图
for item in views:
c.execute(item)
print("创建视图完成!")
# 初始化插入表数据
# 初始化账户表
accounts_table = '''insert into accounts (title) values (?);'''
# 初始化商家/交易对象表
sellers_table = '''insert into sellers (title) values (?);'''
# 初始化类别表
pay_categorys_table = '''insert into pay_categorys (title) values (?);'''
income_categorys_table = '''insert into income_categorys (title) values (?);'''
# 初始化成员/消费者/使用者表
members_table = '''insert into members (title) values (?);'''
accounts = ["微信支付", "支付宝", "小号微信支付", "京东支付", "云闪付", "现金"]
sellers = ["京东", "便利商超", "淘宝", "苏宁", "京东到家", "美团", "饿了么", "其他"]
pay_categorys = ["日常", "交通", "住房", "医疗", "通讯", "人情往来", "科技", "特殊", "娱乐", "经营"]
income_categorys = ["工资", "退款", "费用报销"]
members = ["小家", "大家", "店"]
for item in accounts:
c.execute(accounts_table, (item,))
for item in sellers:
c.execute(sellers_table, (item,))
for item in pay_categorys:
c.execute(pay_categorys_table, (item,))
for item in income_categorys:
c.execute(income_categorys_table, (item,))
for item in members:
c.execute(members_table, (item,))
pay_categorys_cs = [("购物", 1), ("餐饮", 1), ("理发", 1)]
for item in pay_categorys_cs:
c.execute("insert into pay_categorys (title, pid) values (?,?)", item)
print("数据插入完成!")
conn.commit()
c.close()
conn.close()
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(base_dir, "db")
conn = sqlite3.connect(os.path.join(db_dir, "finance.db"))
print("Opened database successfully")
c = conn.cursor()
# 创建表结构
for item in tables:
c.execute(item)
conn.commit()
print("创建数据库表结构完成!")
# 创建视图
for item in views:
c.execute(item)
print("创建视图完成!")
conn.commit()
c.close()
conn.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
6052439ac95c34f6733052b9f660bf3ee5677c7d
|
c2d267ef8f2f9552499e120fc691936c708c3610
|
/main.py
|
a13d3f1749da1f5d684b84fef699c109b7b4f7ec
|
[] |
no_license
|
mattcoldwater/dreamer
|
b367969f1d95776989ff1fa0797ecc18f1115e6d
|
f5873442b30f4bf5c2225e0e53660067d40ad50b
|
refs/heads/main
| 2023-05-01T13:35:24.695539
| 2021-05-19T17:53:28
| 2021-05-19T17:53:28
| 368,954,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,586
|
py
|
import datetime
import os
import argparse
import torch
from rlpyt.runners.minibatch_rl import MinibatchRlEval, MinibatchRl
# Runner - Connects the sampler, agent, and algorithm; manages the training loop and logging of diagnostics
from rlpyt.samplers.serial.sampler import SerialSampler
from rlpyt.samplers.parallel.gpu.sampler import GpuSampler
# Sampler - Manages agent / environment interaction to collect training data, can initialize parallel workers
from rlpyt.utils.logging.context import logger_context
from rlpyt.utils.seed import set_seed
from dreamer_agent import AtariDreamerAgent
from dreamer_algo import Dreamer
from envs.atari import AtariEnv, AtariTrajInfo
from envs.wrapper import make_wapper
from envs.one_hot import OneHotAction
from envs.time_limit import TimeLimit
def build_and_train(log_dir, game="pong", run_ID=0, cuda_idx=None, eval=False, save_model='last', load_model_path=None, n_parallel=2, CumSteps=0):
device = 'cpu' if cuda_idx is None else 'cuda'
params = torch.load(load_model_path, map_location=torch.device(device)) if load_model_path else {}
agent_state_dict = params.get('agent_state_dict')
optimizer_state_dict = params.get('optimizer_state_dict')
##--- wu ---##
log_interval_steps = 5e4
prefill = 5e4
train_every = 16
batch_B = 16
n_steps = 1e4 if eval else 5e6
itr_start = max(0, CumSteps - prefill) // train_every
##--- wu ---##
action_repeat = 4 # 2
env_kwargs = dict(
name=game,
action_repeat=action_repeat,
size=(64, 64),
grayscale=True, # False
life_done=True,
sticky_actions=True,
)
factory_method = make_wapper(
AtariEnv,
[OneHotAction, TimeLimit],
[dict(), dict(duration=1000000 / action_repeat)]) # 1000
sampler = GpuSampler(
EnvCls=factory_method,
TrajInfoCls=AtariTrajInfo,
env_kwargs=env_kwargs,
eval_env_kwargs=env_kwargs,
batch_T=1,
batch_B=batch_B,
max_decorrelation_steps=0,
eval_n_envs=10,
eval_max_steps=int(10e5),
eval_max_trajectories=5,
)
algo = Dreamer(initial_optim_state_dict=optimizer_state_dict,
horizon=10,
use_pcont=True,
replay_size=int(2e6), # int(5e6)
kl_scale=0.1,
batch_size=50,
batch_length=50,
C=1, # 100,
train_every=train_every // batch_B, # 1000
pretrain=100,
world_lr=2e-4, # 6e-4,
value_lr=1e-4, # 8e-5,
actor_lr=4e-5, # 8e-5,
discount=0.999, # 0.99,
expl_amount=0.0, # 0.3,
prefill=prefill // batch_B, # 5000
discount_scale=5., # 10.
video_every=int(2e4 // 16 * 16 // batch_B), # int(10)
)
if eval:
# for eval - all versions
agent = AtariDreamerAgent(train_noise=0.0, eval_noise=0, expl_type="epsilon_greedy", itr_start=itr_start, the_expl_mode='eval',
expl_min=0.0, expl_decay=11000, initial_model_state_dict=agent_state_dict,
model_kwargs=dict(use_pcont=True))
else:
# for train - all versions
# agent = AtariDreamerAgent(train_noise=0.4, eval_noise=0, expl_type="epsilon_greedy", itr_start=itr_start, the_expl_mode='train',
# expl_min=0.1, expl_decay=11000, initial_model_state_dict=agent_state_dict,
# model_kwargs=dict(use_pcont=True))
# for train - dreamer_V2
agent = AtariDreamerAgent(train_noise=0.0, eval_noise=0, expl_type="epsilon_greedy", itr_start=itr_start, the_expl_mode='train',
expl_min=0.0, expl_decay=11000, initial_model_state_dict=agent_state_dict,
model_kwargs=dict(use_pcont=True))
my_seed = 0 # reproductivity
set_seed(my_seed)
runner_cls = MinibatchRlEval if eval else MinibatchRl
runner = runner_cls(
algo=algo, # Uses gathered samples to train the agent (e.g. defines a loss function and performs gradient descent).
agent=agent, # Chooses control action to the environment in sampler; trained by the algorithm. Interface to model.
sampler=sampler,
n_steps=n_steps,
log_interval_steps=log_interval_steps,
affinity=dict(cuda_idx=cuda_idx, workers_cpus=list(range(n_parallel))),
seed=my_seed,
)
config = dict(game=game)
name = "dreamer_" + game
with logger_context(log_dir, run_ID, name, config, snapshot_mode=save_model, override_prefix=True,
use_summary_writer=True):
runner.train()
if __name__ == "__main__":
# python main.py --log-dir /content/debug/ --game pong --load-model-path '/content/pong_0/run_2/itr_149999.pkl' --eval
# python main.py --log-dir /content/pong_0/ --game pong --cuda-idx 0 # --load-model-path '/gdrive/MyDrive/CSE525/Project/saved/itr_68999.pkl'
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--game', help='Atari game', default='pong') # games = ["pong", "chopper_command"]
parser.add_argument('--run-ID', help='run identifier (logging)', type=int, default=0)
parser.add_argument('--cuda-idx', help='gpu to use ', type=int, default=None)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--save-model', help='save model', type=str, default='last',
choices=['all', 'none', 'gap', 'last'])
parser.add_argument('--load-model-path', help='load model from path', type=str) # path to params.pkl
parser.add_argument('--n_parallel', help='number of sampler workers', type=int, default=2)
parser.add_argument('--CumSteps', help='CumSteps', type=int, default=0)
default_log_dir = os.path.join(
'/content/',
# os.path.dirname(__file__),
'data',
'local',
datetime.datetime.now().strftime("%Y%m%d"))
parser.add_argument('--log-dir', type=str, default=default_log_dir)
args = parser.parse_args()
log_dir = os.path.abspath(args.log_dir)
i = args.run_ID
while os.path.exists(os.path.join(log_dir, 'run_' + str(i))):
print(f'run {i} already exists. ')
i += 1
print(f'Using run id = {i}')
args.run_ID = i
build_and_train(
log_dir,
game=args.game,
run_ID=args.run_ID,
cuda_idx=args.cuda_idx,
eval=args.eval,
save_model=args.save_model,
load_model_path=args.load_model_path,
n_parallel=args.n_parallel,
CumSteps=args.CumSteps
)
"""
buffer fill 5e2
under small net:
C=50, every=500 128s;
C=1, every=16 80s-->padding-->35s.
under big net:
C=1, every=16 97s.
fill 480
under small net:
-- 2 cpu -gpusampler B=2 18*2 seconds
-- serial mode 34 seconds
buffer fill 5e3 in gpu serial mode
under small net:
05:56 seconds
0.00859s/step - env
0.2003s/step - Dreamer
buffer fill 5e4 in cpu core 4
00:04:40 para=1 B=1 CumSteps 50000 NewCompletedTrajs 28 StepsPerSecond 177.964
00:02:15 para=3 B=3 CumSteps 49998 NewCompletedTrajs 27 StepsPerSecond 365.73
para=4 B=4 StepsPerSecond 370.25
para=4 B=16 StepsPerSecond 697.326
para=4 B=10 T=10 StepsPerSecond 864.995
para=4 B=10 T=13 StepsPerSecond 876.363
para=4 B=10 T=16 StepsPerSecond 978.95
para=4 B=10 T=17 StepsPerSecond 947.888
para=4 B=10 T=25 StepsPerSecond 911.478
https://github.com/danijar/dreamerv2
# General
task: 'atari_pong'
steps: 2e8
eval_every: 1e5
log_every: 1e4
prefill: 50000
dataset_size: 2e6
pretrain: 0
# Environment
time_limit: 108000 # 30 minutes of game play.
grayscale: True
action_repeat: 4
eval_noise: 0.0
train_every: 16
train_steps: 1
clip_rewards: 'tanh'
# Model
grad_heads: ['image', 'reward', 'discount']
dyn_cell: 'gru_layer_norm'
pred_discount: True
cnn_depth: 48
dyn_deter: 600
dyn_hidden: 600
dyn_stoch: 32
dyn_discrete: 32
reward_layers: 4
discount_layers: 4
value_layers: 4
actor_layers: 4
# Behavior
actor_dist: 'onehot'
actor_entropy: '1e-3'
expl_amount: 0.0
discount: 0.999
imag_gradient: 'reinforce'
imag_gradient_mix: '0'
# Training
discount_scale: 5.0
reward_scale: 1
weight_decay: 1e-6
model_lr: 2e-4
kl_scale: 0.1
kl_free: 0.0
actor_lr: 4e-5
value_lr: 1e-4
oversample_ends: True
"""
|
[
"mattcoldwater@gmail.com"
] |
mattcoldwater@gmail.com
|
dc06bceff161ff58ede64f0c6360bacc5fdbeee6
|
6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6
|
/venv/lib/python3.6/site-packages/pysnmp/proto/api/__init__.py
|
d742ecc76dec1386047d3cae28b450a5edff0f52
|
[
"MIT"
] |
permissive
|
PitCoder/NetworkMonitor
|
b47d481323f26f89be120c27f614f2a17dc9c483
|
36420ae48d2b04d2cc3f13d60d82f179ae7454f3
|
refs/heads/master
| 2020-04-25T11:48:08.718862
| 2019-03-19T06:19:40
| 2019-03-19T06:19:40
| 172,757,390
| 2
| 0
|
MIT
| 2019-03-15T06:07:27
| 2019-02-26T17:26:06
|
Python
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.proto.api import v1, v2c, verdec
# Protocol versions
protoVersion1 = 0
protoVersion2c = 1
protoModules = {protoVersion1: v1, protoVersion2c: v2c}
decodeMessageVersion = verdec.decodeMessageVersion
|
[
"overlord.lae@gmail.com"
] |
overlord.lae@gmail.com
|
147e003c21d2a29fc5897549545ed467dfb5bd90
|
43d278d96578ec1bc59954b8d4ddbe82dde39a35
|
/scripts/ex5p.py
|
70e34a3440dd793ff5f79c3b02946bb81a7d3d46
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
Teradata/r-python-sto-orangebook-scripts
|
7a9c02baf78e1c49f9414601bae46d8cd8e378d9
|
1f54d4ac61df3485f0ddb8a13422ca8dda4d3372
|
refs/heads/master
| 2023-08-22T04:54:23.057004
| 2023-08-09T18:35:25
| 2023-08-09T18:35:25
| 233,639,793
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,964
|
py
|
################################################################################
# The contents of this file are Teradata Public Content
# and have been released to the Public Domain.
# Licensed under BSD; see "license.txt" file for more information.
# Copyright (c) 2023 by Teradata
################################################################################
#
# R And Python Analytics with SCRIPT Table Operator
# Orange Book supplementary material
# Alexander Kolovos - July 2023 - v.2.5
#
# Example 5: Linear Regression with the CALCMATRIX table operator (Python vers.)
# File : ex5p.py
#
# (Adapted from the Teradata Developer Exchange online example by Mike Watzke:
# http://developer.teradata.com/extensibility/articles/
# in-database-linear-regression-using-the-calcmatrix-table-operator)
#
# Use case:
# A simple example of linear regression with one dependent and two independent
# variables (univariate, multiple variable regression). For the regression
# computations, we need to calculate the sums of squares and cross-products
# matrix of the data. The example illustrates how to use the CALCMATRIX table
# operator for this task. The script returns the estimates of the regression
# coefficients.
#
# Script accounts for the general scenario that an AMP might have no data.
#
# Requires the numpy and pandas add-on packages.
#
# Required input:
# - ex5tbl table data from file "ex5dataTblDef.sql"
#
# Output:
# - varName: Regression coefficient name
# - B : Regression coefficient estimated value
#
################################################################################
# Load dependency packages
import pandas as pd
import numpy as np
import sys
DELIMITER='\t'
# The input comes from CALCMATRIX. When in the COMBINE phase with 'COLUMNS'
# output and CALCTYPE set to 'ESSCP' (extended sums of squares and
# cross-product), then output includes following columns:
# INTEGER rownum, VARCHAR(128) rowname, BIGINT c (for count), FLOAT column s
# (for summation), and a FLOAT column for each data column of input.
tbldata = []
colNames = []
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allnum = line.split(DELIMITER)
colNames.append(allnum[1].strip())
allnum = [float(x.replace(" ","")) for x in allnum[2:]]
tbldata.append(allnum)
except (EOFError): # Exit if reached EOF or CTRL-D
break
nRec = len(tbldata)
# If the present AMP has no data, then exit this script instance.
if nRec==0: # not tbldata.size:
sys.exit()
del allnum
colNames.insert(0,'s') # Account for sum in current column 2
colNames.insert(0,'c') # Account for count in current column 1
xCols = colNames[1:len(colNames)-1] # Include sum and all independ var columns
df = pd.DataFrame(tbldata, columns=colNames)
df.insert(0,'rowname',colNames[2:]) # Prepend the column with row names
# Extract partial X'X
pXX = np.asarray( df.loc[ df['rowname']!='y', xCols ] )
# Extract observation count
obscount = np.asarray( df.loc[ df['rowname']=='y' , 'c'].iat[0] )
# Extract X variable summations
Xsum = np.asarray( df.loc[ df['rowname']!='y', 's' ] )
# Build first row of matrix X'X
XX = np.hstack((obscount, Xsum))
# Append partial X'X
XX = np.vstack((XX, pXX))
# Invert X'X
iXX = np.linalg.inv(XX)
# Extract Y variable summations
Ysum = np.asarray( df.loc[ df['rowname']=='y' , 's'].iat[0] )
# Extract partial X'Y
XY = np.asmatrix( df.loc[ df['rowname']!='y' , 'y'] ).T
XY = np.asarray(XY)
# Build X'Y of matrix
XY = np.vstack((Ysum, XY))
# Multiply inverted X'X * X'Y to obtain coefficients
B = np.dot(iXX,XY)
# Gather names of variables
varName = ['Intercept']
varName.extend(xCols[1:]) # Skip column name of sums
# Export results to the SQL Engine database through standard output
for i in range( 0, len(varName) ):
print(varName[i], DELIMITER, float(B[i]))
|
[
"alexander.kolovos@teradata.com"
] |
alexander.kolovos@teradata.com
|
06eea92ff092eef3df728b082305ff0eb792b444
|
1871ab24904127053c9d6c5ffd9b776a61b7495b
|
/Day004/day4.4_rock_paper_scissors.py
|
5deeca44e47bc8a79a125cc3b0b59f57782915aa
|
[] |
no_license
|
izark99/100DaysPython
|
d857adb08e0186802c766fe6a8bddceb56feed09
|
cf0b885c2d03e0dbf80a1c825d325e025fe30fe4
|
refs/heads/master
| 2023-03-04T02:41:02.119120
| 2021-02-16T06:02:05
| 2021-02-16T06:02:05
| 336,467,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
your_choose = int(input("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n"))
if your_choose > 2 or your_choose < 0:
print("You type wrong number! So I choose random for you")
your_choose = random.randint(0,2)
if your_choose == 0:
print(rock)
elif your_choose == 1:
print(paper)
else:
print(scissors)
elif your_choose == 0:
print(rock)
elif your_choose == 1:
print(paper)
elif your_choose == 2:
print(scissors)
print("Computer choose: ")
ramdom_choose = random.randint(0,2)
if ramdom_choose == 0:
print(rock)
elif ramdom_choose == 1:
print(paper)
else:
print(scissors)
if your_choose == 0 and ramdom_choose == 2:
print("You win!")
elif ramdom_choose > your_choose:
print("You lose!")
elif ramdom_choose < your_choose:
print("You win!")
elif ramdom_choose == your_choose:
print("Draw!")
else:
print("You lose!")
|
[
"izarknguyen@gmail.com"
] |
izarknguyen@gmail.com
|
236f2f6a95d6fae44b77149fadda0d33ae893743
|
fd981b47482467291576ae4650d2925d6fa00564
|
/robot_ws/build/hector_slam/hector_imu_attitude_to_tf/catkin_generated/pkg.develspace.context.pc.py
|
76e540da7a8c070743b0ac0c7f119166f43c6959
|
[] |
no_license
|
Forrest-Z/rtcrobot
|
7337271e726db794ce08953f333ad9a0f8e70027
|
229ce1d7e77af9348eac870e00a2c4049e4562f1
|
refs/heads/master
| 2022-12-16T00:14:17.525845
| 2020-05-18T09:28:09
| 2020-05-18T09:28:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_imu_attitude_to_tf"
PROJECT_SPACE_DIR = "/home/gaara/robot_ws/devel"
PROJECT_VERSION = "0.3.5"
|
[
"mtk@mtk"
] |
mtk@mtk
|
20f345afae40b45b87b25fa17e45b95c149d222b
|
1718e5e3ffb1385860aeb48126caf9fece608ccf
|
/labs/12/objdetect.py
|
9f9034992a79ffb4ff46cf1cd8328dd80a2c7dda
|
[] |
no_license
|
kristen-foong/spja
|
267df57847e01a49354a2f932158ff5a2765857b
|
082fde2e163c0bb26ce4815d0815e4a0af4a5b83
|
refs/heads/master
| 2023-01-06T16:41:54.564429
| 2019-12-11T10:45:31
| 2019-12-11T10:45:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
import cv2
import numpy as np
import pyyolo
darknet_path = 'pyyolo/darknet'
datacfg = 'cfg/coco.data'
cfgfile = 'cfg/yolov3-tiny.cfg'
weightfile = '../yolov3-tiny.weights'
thresh = 0.4
hier_thresh = 0.4
delay = 10
frame = 0
cam = cv2.VideoCapture(-1)
# cam = cv2.VideoCapture("http://192.168.1.23:4747/video")
outputs = []
pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
while True:
frame += 1
_, img = cam.read()
if frame % 4 == 0:
height, width, channels = img.shape[:3]
transposed = img.transpose(2, 0, 1) # move channels to beginning
data = transposed.ravel() / 255.0 # linearize and normalize
data = np.ascontiguousarray(data, dtype=np.float32)
outputs = pyyolo.detect(width, height, channels, data, thresh, hier_thresh)
color = (255, 255, 255)
for output in outputs:
if output["prob"] >= 0.2:
color = list(np.random.random(size=3) * 256)
tl = (output["left"], output["top"])
cv2.rectangle(img, tl, (output["right"], output["bottom"]), color)
cv2.putText(img, "{} ({:.2f} %)".format(output["class"], output["prob"] * 100),
(tl[0] - 20, tl[1] - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.0, color)
cv2.imshow("win", img)
key = cv2.waitKey(delay) & 0xFF
pyyolo.cleanup()
|
[
"jakub.beranek@vsb.cz"
] |
jakub.beranek@vsb.cz
|
c4398a387f767786e13fa0ebf8d104429c864d08
|
3e1aa41a30ce18816789f8d1c5cc57dfc16bab87
|
/book_qt_designer_app8.py
|
b657d34200663e35cb16362b51f4f2d25425e699
|
[] |
no_license
|
iianarmas/pyqt6
|
241d10f99debff155f028e3ff5da3cf4073f0159
|
55d91ab3cfd159188186bcc22ffa7a7bd7bf2f18
|
refs/heads/main
| 2023-08-14T21:11:23.311000
| 2021-10-09T11:33:32
| 2021-10-09T11:33:32
| 412,493,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
# ========== MAIN.PY ========== #
from PyQt6.QtCore import QSize, Qt
from PyQt6.QtSql import QSqlDatabase, QSqlTableModel, QSqlQueryModel, QSqlQuery
from PyQt6.QtWidgets import (
QApplication,
QMainWindow,
QLineEdit,
QTableView,
QVBoxLayout,
QWidget,
QLabel, QPushButton, QListView,
)
db = QSqlDatabase('QSQLITE')
db.setDatabaseName('resources/db/sample_db.sqlite')
db.open()
temp = ''
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
container = QWidget()
layout = QVBoxLayout()
label = QLabel()
label.setText('Search Name')
self.search = QLineEdit()
self.search.textChanged.connect(self.update_filter)
# self.table = QTableView()
self.table = QListView()
self.model = QSqlQueryModel()
self.table.setModel(self.model)
self.query = QSqlQuery(db=db)
self.query.prepare(
'SELECT Name FROM Track WHERE Name LIKE "%" || :name || "%"'
)
layout.addWidget(self.table)
layout.addWidget(label)
layout.addWidget(self.search)
container.setLayout(layout)
self.setMinimumSize(QSize(1024, 600))
self.update_filter()
self.setCentralWidget(container)
def execute_query(self):
self.query.exec()
self.model.setQuery(self.query)
def update_filter(self, text=None):
name = text
"""if name == '':
name = None
self.query.bindValue(":name", name)
self.execute_query()
else:
self.query.bindValue(":name", name)
self.execute_query()"""
# for python3.10
match name:
case '':
name = None
self.query.bindValue(':name', name)
self.execute_query()
case _:
self.query.bindValue(':name', name)
self.execute_query()
if __name__ == "__main__":
app = QApplication([])
window = MainWindow()
window.show()
app.exec()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e94f6a0ef46d77df9c8d3ece79519b0d26d16bf7
|
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
|
/Python-OOP/Exam-Preparation/16-Aug-2020/project/software/light_software.py
|
6182deaf8edbfaa898d0623ff12527b07c73dd0b
|
[] |
no_license
|
Sheko1/SoftUni
|
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
|
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
|
refs/heads/main
| 2023-07-13T15:39:48.826925
| 2021-08-21T12:51:02
| 2021-08-21T12:51:02
| 317,266,200
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from .software import Software
class LightSoftware(Software):
CAPACITY_INCREASE = 0.5
MEMORY_DECREASE = 0.5
def __init__(self, name: str, capacity_consumption: int, memory_consumption: int):
super().__init__(name, type="Light", capacity_consumption=int(
capacity_consumption + (capacity_consumption * self.CAPACITY_INCREASE)),
memory_consumption=int(memory_consumption * self.MEMORY_DECREASE))
|
[
"martinkypar@gmail.com"
] |
martinkypar@gmail.com
|
58e80310c3825cd59f6868df5b281737506dd1ef
|
d5a4f34b6f82a167c973df44093ad5ced0d30ccd
|
/venv/Scripts/django-admin.py
|
d443d34793b15686d21799524a9faae1076a6a97
|
[] |
no_license
|
jayawd02/Lecture
|
914c8b9a0e5e001ea3c4b596310ef7dfad359498
|
5fc36b199ea3dc23979142794e3390a129626405
|
refs/heads/master
| 2022-11-26T07:29:44.556994
| 2020-08-10T12:43:25
| 2020-08-10T12:43:25
| 286,471,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
#!C:\Users\senan\PycharmProjects\Lecture\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"jayawd02@myunitec.ac.nz"
] |
jayawd02@myunitec.ac.nz
|
23d4b4f8263f3dab3233d3361d31bb2e52192e50
|
1d67e0a295b3ee9d56a84ced5fb5727852533c80
|
/Flask/demo/__init__.py
|
67bf8ab227d21a4d800a8433638bd49032628381
|
[] |
no_license
|
ab1c740/testgit
|
4e9a2da80155d62f0c09eac1bd59464c5b611076
|
09bc9222c6dc4f45f1d1d6f22c8816c38d501874
|
refs/heads/master
| 2020-03-30T00:41:29.917247
| 2018-09-28T03:39:03
| 2018-09-28T03:39:03
| 150,537,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
__author__ = 'dev11'
from flask import Flask
app=Flask(__name__)
|
[
"1724940634@qq.com"
] |
1724940634@qq.com
|
9b026ba7f48d69b82da9bdfd8f2359d663bc872c
|
ef92d05ec7e9be461a80b8ea5e6fc3b7c15a2634
|
/RainbowFileReaders/SOBModelReader.py
|
657f781b7f1567696c9a1e72345bf57645a69eb4
|
[
"MIT"
] |
permissive
|
boristsr/RainbowSixFileConverters
|
dc6fa409f4ca98df50f9db53002b0eddb0111011
|
1f755f781ee85af068ba7bcc73d4960998363794
|
refs/heads/master
| 2022-12-05T14:22:38.024932
| 2021-06-14T03:30:14
| 2021-06-14T03:30:14
| 277,547,547
| 3
| 0
|
MIT
| 2022-11-22T07:57:23
| 2020-07-06T13:20:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
"""Provides classes that will read and parse SOB model files."""
from typing import List
from FileUtilities.BinaryConversionUtilities import BinaryFileDataStructure, FileFormatReader, SizedCString, BinaryFileReader
from RainbowFileReaders import R6Settings
from RainbowFileReaders.RSEMaterialDefinition import RSEMaterialDefinition, RSEMaterialListHeader
from RainbowFileReaders.CXPMaterialPropertiesReader import load_relevant_cxps
from RainbowFileReaders.RSEGeometryDataStructures import R6GeometryObject, RSEGeometryListHeader
class SOBModelFile(FileFormatReader):
"""Class to read full SOB files"""
def __init__(self):
super(SOBModelFile, self).__init__()
self.header: SOBHeader = None
self.materialListHeader: RSEMaterialListHeader = None
self.materials: List[RSEMaterialDefinition] = []
self.geometryListHeader: RSEGeometryListHeader = None
self.geometryObjects: List[R6GeometryObject] = []
self.footer: SOBFooterDefinition = None
def read_data(self):
super().read_data()
fileReader = self._filereader
self.header = SOBHeader()
self.header.read(fileReader)
if self.verboseOutput:
self.header.print_structure_info()
self.materialListHeader = RSEMaterialListHeader()
self.materialListHeader.read(fileReader)
if self.verboseOutput:
self.materialListHeader.print_structure_info()
_, gameDataPath, modPath = R6Settings.determine_data_paths_for_file(self.filepath)
CXPDefinitions = load_relevant_cxps(gameDataPath, modPath)
self.materials = []
for _ in range(self.materialListHeader.numMaterials):
newMaterial = RSEMaterialDefinition()
newMaterial.read(fileReader)
newMaterial.add_CXP_information(CXPDefinitions)
self.materials.append(newMaterial)
if self.verboseOutput:
newMaterial.print_structure_info()
self.geometryListHeader = RSEGeometryListHeader()
self.geometryListHeader.read(fileReader)
if self.verboseOutput:
self.geometryListHeader.print_structure_info()
self.geometryObjects = []
for _ in range(self.geometryListHeader.count):
newObj = R6GeometryObject()
newObj.read(fileReader)
self.geometryObjects.append(newObj)
if self.verboseOutput:
newObj.print_structure_info()
self.footer = SOBFooterDefinition()
self.footer.read(fileReader)
class SOBHeader(BinaryFileDataStructure):
"""Contains the information stored in the file formats header structure"""
def __init__(self):
super(SOBHeader, self).__init__()
def read(self, filereader: BinaryFileReader):
super().read(filereader)
self.header_begin_message = SizedCString(filereader)
class SOBFooterDefinition(BinaryFileDataStructure):
"""Contains the information stored in the file formats footer structure"""
def __init__(self):
super(SOBFooterDefinition, self).__init__()
def read(self, filereader: BinaryFileReader):
super().read(filereader)
self.end_model_string = SizedCString(filereader)
if __name__ == "__main__":
test = SOBModelFile()
|
[
"philip@gdcorner.com"
] |
philip@gdcorner.com
|
973a8a1b14c329a5c94da274049a59f8ec211b7e
|
6c4096a4cedbf5c496494e16a521d1d325e7726b
|
/hw_set2.py
|
70590f56dad314b671c25c1c11c4b8daed0aaa7f
|
[] |
no_license
|
bradlyke/astr5490
|
124b12ac1e6c4924855afd77a3e5adb4c3ae882b
|
252cf3f82b3d0fce8c55d5c1cc5e6ad9de85ffac
|
refs/heads/master
| 2021-06-03T23:48:23.253311
| 2020-11-28T20:35:28
| 2020-11-28T20:35:28
| 147,553,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,270
|
py
|
"""
By: Brad Lyke
All of the code for homework set 2. This requires python 3 and latex (for plots).
This also requires that sciCon be in the same folder (or in your PYTHONPATH).
It is included in the utilities repository (or submitted along with this program).
Parameters
----------
None : :class:'float'
The program will prompt for user input.
For user choices, use 1/2 or y/n as asked.
For the initial position/velocity input comma-separated (no spaces)
values in floats or scientific notation (centimeters for R, km/s for V)
Returns
----------
--main--
:class:'str'
Returns the orbital parameters generated from either the test point or the
user's inputs.
--prob2_plotter--
:class:'none'
Returns 3 figures:
a) (Y v X) and (y v x)
b) X(t) and Y(t)
c) Vz(t)
--prob3_plotter--
:class:'none'
Returns 3 figures:
a) Five plots for variable values of i (e~0)
b) Five plots for variable w (e=0.5)
c) Five plots for variable O (e=0.5,w=pi/2)
Note: The initial position and velocity input by the user must match a
self-consistent set of data or the program may return garbage (which is not
this programmer's responsibility to correct for). Make sure V^(2) will work
for a given R^(2). Also, the programs assume the R,V given are for periastron.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as ticker
matplotlib.rc('text',usetex=True)
import sys
from sciCon import mks #I wrote this, so you'll have to have it in the same folder.
#from mpl_toolkits.mplot3d import Axes3D
#This is part of the function for root finding from homework 1.
#It will find E_i+1 from E_i
def e_next(E_p,M,e):
denom = (1 - (e*np.cos(E_p)))**(-1.0) #derivative of g(E)
numer = E_p - (e*np.sin(E_p)) - M #g(E)
E_next = E_p - (numer * denom) #The definition of Newton-Raphson method
return E_next
#This is the second part for root finding from homework 1.
#It iterates through E until E_i+1 - E_i is less than the check_val.
#It runs through a number of steps equal to counter to keep a diverging solution
#from running forever. If it fails it returns a fail_check of 1
def newt_method(M,e,E0):
counter = 10000 #Number of steps for protection
fail_check = 0
check_val = 1e-5 #Threshhold for convergence. Set smaller for better solution
E_p = E0 #Initialize E_i for the first step
for i in range(counter):
E_n = e_next(E_p,M,e)
if np.abs(E_n - E_p) <= check_val: #Check for final solution.
E_final = E_n
break
else: #If it hasn't converged yet, go to the next iteration.
E_p = E_n
if i +1 == counter: #This is the check in case it diverged by the step limit.
print('Did not converge in {} steps'.format(counter))
fail_check = 1
E_final = E_n
#Return some value for E_final (hopefully the convergent one) and the
#fail_check condition.
return E_final,fail_check
#This is radial function for finding the position. Finds radius (r) from E.
#Note, this and the root functions require e to not be 0. Later code where e=0
#will actually use e=10^-8
def r_func(E_f,e,a):
return a*(1 - (e*np.cos(E_f)))
#This is the angle function for finding the position.
#Finds the angle from pericenter based on r_current and the
#input eccentricity and semi-major axis.
#Because this function uses arccos, which has a limited range, code calling
#this function will need to correct f(t) if t > T/2.
def f_func(r,e,a):
numer = (a * (1-e**(2))) - r
denom = (e * r)**(-1.0)
f = np.arccos(numer * denom) #Uses multiplication to keep from blowing up
return f
#This function finds a vector's magnitude. I needed this enough to write a function
def vmag(in_vec):
return np.sqrt(in_vec.dot(in_vec))
#This function finds the angle from the cross product definition of
#AxB = ABsin(x). Will find the angle in both radians and degrees by default.
def cross_angle(vector1,vector2,deg=True):
cprod = np.cross(vector1,vector2)
prod_mag = vmag(cprod)
vec1_mag = vmag(vector1)
vec2_mag = vmag(vector2)
denom = vec1_mag * vec2_mag
cang = np.arcsin(prod_mag * denom**(-1.0))
cang_deg = cang * (180/np.pi)
if deg==True:
return cang,cang_deg
else:
return cang
#This does the same as above, but using the dot product definition
#A.B = ABcos(x)
def dot_angle(vector1,vector2,deg=True):
dprod = vector1.dot(vector2)
vec1_mag = vmag(vector1)
vec2_mag = vmag(vector2)
denom = vec1_mag * vec2_mag
dang = np.arccos(dprod * denom**(-1.0))
dang_deg = dang * (180/np.pi)
if deg==True:
return dang,dang_deg
else:
return dang
#This solves for the orbital parameters (a, e, Omega, i, w, Period, |h|)
#for a given initial position vector and velocity vector. Because a single point
#is not enough to solve for these, the second point is assumed to be the same
#point in the observer frame, but at periastron. If the point isn't periastron
#too bad.
def orbit_params(obs_init,vel_obs_init):
#This will find the instantaneous velocity and position
#Calculate semi-major axis first
G = mks.G
mSun = 1.98855e30 #These are in mks. Units don't really matter as long as they
mJup = 1.8982e27 #are consistent.
mT = mSun+mJup
GMT = G*mT
r_init = np.sqrt(obs_init[0]**(2) + obs_init[1]**(2)+obs_init[2]**(2))
v_initsq = vel_obs_init[0]**(2)+vel_obs_init[1]**(2)+vel_obs_init[2]**(2)
a = ((2/r_init) - (v_initsq/(GMT)))**(-1.0)
#We are assuming we are at periastron, with the initial position and velocity
pos_init = np.array([r_init,0,0])
vel_init = np.array([0,np.sqrt(v_initsq),0])
#Now find the period
T = np.sqrt((4*np.pi**(2)*a**(3))/(GMT))
#Now find the magnitude of the angular momentum (per unit mass) in the Orbital
#frame of reference.
h = np.cross(pos_init,vel_init)
hmag = vmag(h)
#Finally find the eccentricity
e_part = (T*hmag)/(2*np.pi*a**(2))
e = np.sqrt(1 - e_part**(2))
r = a*(1-e)
#To find the angles for the orbit, we need the angular momentum per unit mass
#in the observer frame as well, which is H.
H = np.cross(obs_init,vel_obs_init)
Hmag = vmag(H)
Zhat = np.array([0,0,1]) #Observer frame Zhat vector.
nvec = np.cross(Zhat,H)
#Find Omega based on H.
if nvec[1] >= 0:
Omega = (np.arccos(nvec[0]/vmag(nvec)))
elif nvec[1] < 0:
Omega = (2*np.pi) - (np.arccos(nvec[0]/vmag(nvec)))
#The inclination can be found from Hxh as the only rotation between these two
#is i, due to h having no z-component.
inc = cross_angle(H,h,deg=False)
#The distance from the star in the observer's frame.
Rmag = vmag(obs_init)
#Finally, solve the function for Z(t) (observer frame) for womega (the last)
#unknown angle).
womega = np.arcsin(obs_init[2]/(Rmag*np.sin(inc)))
#Print out the orbital parameters we just found and return them.
print('Semi-Major Axis: {:5.3f} au'.format(a/mks.au))
print('Eccentricity: {:6.4f}'.format(e))
print('Longitude of Ascending Node (O): {:6.2f} deg'.format(Omega*180/np.pi))
print('Inclination (i): {:6.2f} deg'.format(inc*180/np.pi))
print('Argument of Periapse (w): {:6.2f} deg'.format(womega*180/np.pi))
return a,e,Omega,inc,womega,T,hmag
#This function will find the instantaneous position and velocity (observer's frame)
#from the orbital parameters and t (time). To ensure the function for E converges
#we need to keep the previous E found from the last iteration and use it for our
#initial E in this iteration.
def pos_vel(a,e,Om,i,wom,t,T,bhc,hmag,Ep):
#Initialize empty vectors. orb_vec is the position in the orbital plane. Used
#for plotting purposes in problem 2 (for comparison).
orb_vec = np.zeros(3,dtype='f8') #0 - x, 1 - y, 2 - z
pos_vec = np.zeros(3,dtype='f8') #0 - X, 1 - Y, 2 - Z
vel_vec = np.zeros(3,dtype='f8') #0 - Vx, 1 - Vy, 2 - Vz
#Find the mean motion.
n = (2*np.pi) / T
#Find the mean anomaly based on the time.
M = n * t
#Solve for E, then r, then f for a given time.
E_instant,fc = newt_method(M,e,Ep)
r = r_func(E_instant,e,a)
f = f_func(r,e,a)
#This corrects the angle, f(t), based on the period due to f(t) using arccos.
if bhc == 1:
f += 2*(np.pi - f)
#This finds the orbital plane position. Note that z is always 0.
orb_vec[0] = r*np.cos(f)
orb_vec[1] = r*np.sin(f)
orb_vec[2] = 0
#Now find the position and velocity in the observer's frame. The angular
#component of the position reappears in the velocity (from the product rule)
#so I find it separately. Note that fdot is based on |h|, not |H|, so we need
#the magnitude of the angular momentum vector in the orbital plane as |h| is
#constant.
xang = (np.cos(Om)*np.cos(wom + f)) - (np.sin(Om)*np.sin(wom + f)*np.cos(i))
yang = (np.sin(Om)*np.cos(wom + f)) + (np.cos(Om)*np.sin(wom + f)*np.cos(i))
zang = np.sin(wom + f)*np.sin(i)
pos_vec[0] = r*xang
pos_vec[1] = r*yang
pos_vec[2] = r*zang
#And now we generate the velocity for the given point. Note that rdot depends
#on fdot, but fdot only relies on |h| and r (which we already found).
#I took these derivatives by hand.
fdot = hmag * r**(-2.0)
rdot = a*e*(1-e**(2))*np.sin(f)*fdot*(1 + (e*np.cos(f)))**(-2.0)
vel_vec[0] = (rdot*xang) - (r*fdot*((np.cos(Om)*np.sin(wom+f)) + (np.sin(Om)*np.cos(wom+f)*np.cos(i))))
vel_vec[1] = (rdot*yang) - (r*fdot*((np.sin(Om)*np.sin(wom+f)) - (np.cos(Om)*np.cos(wom+f)*np.cos(i))))
vel_vec[2] = (rdot*zang) + (r*fdot*np.cos(wom+f)*np.sin(i))
#Returns the position and velocity for the observer frame. Also returns the
#position for the orbital plane (for plotting) and the value we found for E.
return pos_vec,vel_vec,orb_vec,E_instant
#Orbital parameters must be self-consistent. If the speed is too low, the object
#won't orbit. If the distance is too long, the speed might be too high.
#This function generates a self-consistent test point (with a given set of
#parameters) so I could test the above code and ensure it was giving back the
#right values for the angles. If you choose the first option from the program
#this is the function called.
def gen_test_point(a_test,e_test,i_test,w_test,o_test):
#Convert the values for a, e, i, w, O into the right units (centimeters and radians)
a = a_test*mks.au
e = e_test
i = i_test*np.pi/180
w = w_test*np.pi/180
O = o_test*np.pi/180
#We assume this object is at periastron. If it's not, too bad.
f = 0
r = a*(1-e)
#Find v^2, observer position, and observer velocity vector from givens.
mSun = 1.98855e30
mJup = 1.8982e27
mT = mSun+mJup
pos_orb_test = np.array([r,0,0]) #periastron orbital-frame position vector.
vsq = mks.G*mT*((2/r) - (1/a))
vel_orb_test = np.array([0,np.sqrt(vsq),0]) #Periastron orbital-frame v-vector.
#Observer-frame position.
xang = (np.cos(O)*np.cos(w))-(np.sin(O)*np.sin(w)*np.cos(i))
yang = (np.sin(O)*np.cos(w))+(np.cos(O)*np.sin(w)*np.cos(i))
zang = np.sin(w)*np.sin(i)
X0 = r*xang
Y0 = r*yang
Z0 = r*zang
#Note that the functions for V(t), don't include the rdot term Because
#we don't need an rdot to make it work for this single point, just the rotation
#into the correct frame.
VX0 = -np.sqrt(vsq)*((np.cos(O)*np.sin(w)) + (np.sin(O)*np.cos(w)*np.cos(i)))
VY0 = -np.sqrt(vsq)*((np.sin(O)*np.sin(w)) - (np.cos(O)*np.cos(w)*np.cos(i)))
VZ0 = np.sqrt(vsq)*np.cos(w)*np.sin(i)
#Turn the above information into vectors.
pos_obs_test = np.array([X0,Y0,Z0])
vel_obs_test = np.array([VX0,VY0,VZ0])
#Return the vectors found. r0, R0, v0, V0
return pos_orb_test,pos_obs_test,vel_orb_test,vel_obs_test
#This function creates the plots asked for in problem 2. The first plot is a 3D
#plot of the observer-frame elliptical orbit in X,Y,Z. This requires a non-standard
#package, so I commented it out. If you want to see it, remove the quotes and uncomment
#the line at the top importing Axes3D.
def prob2_plotter(pos_array,orb_array,vel_array,t_arr):
matplotlib.rc('font',size=15)
'''
fig1 = plt.figure()
ax1 = fig1.add_subplot(111,projection='3d')
ax1.scatter(pos_array[:,0],pos_array[:,1],pos_array[:,2])
ax1.set_xlabel('X-Coordinate (m)')
ax1.set_ylabel('Y-Coordinate (m)')
ax1.set_zlabel('Z-Coordinate (m)')
ax1.tick_params(axis='both',direction='in')
ax1.tick_params(axis='both',which='minor',direction='in')
ax1.tick_params(top=True,right=True)
ax1.tick_params(which='minor',top=True,right=True)
'''
#This plots x,y and X,Y on the same plot so you can see what the rotation
#does to the ellipse.
fig2,ax2 = plt.subplots(figsize=(10,8))
ax2.scatter(orb_array[:,0]/mks.au,orb_array[:,1]/mks.au,color='magenta',marker='.',label='Orbital Plane')
ax2.scatter(pos_array[:,0]/mks.au,pos_array[:,1]/mks.au,color='black',marker='.',label='Observer Plane')
ax2.set_xlabel('X-Coordinate (au)',fontsize=15)
ax2.set_ylabel('Y-Coordinate (au)',fontsize=15)
ax2.tick_params(axis='both',direction='in')
ax2.tick_params(axis='both',which='minor',direction='in')
ax2.tick_params(top=True,right=True)
ax2.tick_params(which='minor',top=True,right=True)
ax2.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax2.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax2.legend()
#This plots the X(t) and Y(t) just to show the cos/sin offset.
fig3,ax3 = plt.subplots(figsize=(10,8))
ax3.scatter(t_arr/86400,pos_array[:,0]/mks.au,color='black',marker='x',label='X-coord')
ax3.scatter(t_arr/86400,pos_array[:,1]/mks.au,color='blue',marker='.',label='Y-coord')
ax3.set_xlabel('Time (days)',fontsize=15)
ax3.set_ylabel('Position (au)',fontsize=15)
ax3.tick_params(axis='both',direction='in')
ax3.tick_params(axis='both',which='minor',direction='in')
ax3.tick_params(top=True,right=True)
ax3.tick_params(which='minor',top=True,right=True)
ax3.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax3.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax3.legend()
#This plots the Vz(t) to show there is a non-zero Z-component velocity
#in the observer reference frame (which would show up as a Vr for the
#observer on Earth)
fig4,ax4 = plt.subplots(figsize=(10,8))
ax4.scatter(t_arr/86400,vel_array[:,2]/1000,color='black',marker='.')
ax4.set_xlabel('Time (days)',fontsize=15)
ax4.set_ylabel(r'$\textrm{V}_{\textrm{z}}$ (km s$^{-1}$)',fontsize=15)
ax4.tick_params(axis='both',direction='in')
ax4.tick_params(axis='both',which='minor',direction='in')
ax4.tick_params(top=True,right=True)
ax4.tick_params(which='minor',top=True,right=True)
ax4.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax4.yaxis.set_minor_locator(ticker.MultipleLocator(1))
plt.show()
#This function generates the plots asked for in problem 3. I made up angles
#for i,w,O to iterate through using 3d arrays.
def prob3_plotter(R0,V0):
#Generate all of the variable values for i,w,O.
var_params = np.zeros(5,dtype=[('INC','f8'),('wom','f8'),('Om','f8')])
var_params['INC'] = np.array([20,50,100,150,300])*np.pi/180
var_params['wom'] = np.array([50,100,150,225,300])*np.pi/180
var_params['Om'] = np.array([20,100,150,225,300])*np.pi/180
#Create blank 3d arrays for X,Y position for variable i.
pos_varINC_array = np.zeros((num_steps,3,5),dtype='f8')
vel_varINC_array = np.zeros((num_steps,3,5),dtype='f8')
#Initialize all 3 figures up front.
fig1,ax1 = plt.subplots(figsize=(10,8))
fig1x,ax1x = plt.subplots(figsize=(10,8))
fig1v,ax1v = plt.subplots(figsize=(10,8))
fig2,ax2 = plt.subplots(figsize=(10,8))
fig2x,ax2x = plt.subplots(figsize=(10,8))
fig2v,ax2v = plt.subplots(figsize=(10,8))
fig3,ax3 = plt.subplots(figsize=(10,8))
fig3x,ax3x = plt.subplots(figsize=(10,8))
fig3v,ax3v = plt.subplots(figsize=(10,8))
#This iterates through all 5 values of i using the same input Parameters
#from the test_point generation function. e cannot be exactly zero as the
#root-finding functions are undefined in this case. So I made it really small
#but still not 0.
for j in range(5):
back_half_check = 0
pos_varINC_array[0,:,j] = R0
vel_varINC_array[0,:,j] = V0
a_temp = 1.25*mks.au
e_temp = 1e-8 #The code requires an non-zero eccentricity. So I used a small value.
O_temp = 50*np.pi/180
i_temp = var_params['INC'][j]
w_temp = 20*np.pi/180
Ep = 0
#This part iterates through the X,Y for this loops i-value.
for k in range(1,num_steps):
t = t_arr[k]
if k > (num_steps/2):
back_half_check = 1
pos_vec, vel_vec, orb_pos,E_n = pos_vel(a_temp,e_temp,O_temp,i_temp,w_temp,t,T,back_half_check,mag_h,Ep)
Ep = E_n
pos_varINC_array[k,:,j],vel_varINC_array[k,:,j] = pos_vec[:],vel_vec[:]
#Plot this X,Y for the given i. Label them accordinginly.
plab = 'i = %6.2f$^{\circ}$'%(i_temp*180/np.pi)
ax1.scatter(pos_varINC_array[:,0,j]/mks.au,pos_varINC_array[:,1,j]/mks.au,marker='.',label=plab)
ax1x.scatter(t_arr/86400,pos_varINC_array[:,0,j]/mks.au,marker='.',label=plab)
ax1v.scatter(t_arr/86400,vel_varINC_array[:,2,j]/1000,marker='.',label=plab)
#Plot the X/Y plot for the variable inclinations
ax1.set_xlabel('X-Coordinate (au)',fontsize=15)
ax1.set_ylabel('Y-Coordinate (au)',fontsize=15)
ax1.tick_params(axis='both',direction='in')
ax1.tick_params(axis='both',which='minor',direction='in')
ax1.tick_params(top=True,right=True)
ax1.tick_params(which='minor',top=True,right=True)
ax1.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax1.legend()
#Plot the X vs. t plot for the variable inclinations
ax1x.set_xlabel('Time (days)',fontsize=15)
ax1x.set_ylabel('X-Coordinate (au)',fontsize=15)
ax1x.tick_params(axis='both',direction='in')
ax1x.tick_params(axis='both',which='minor',direction='in')
ax1x.tick_params(top=True,right=True)
ax1x.tick_params(which='minor',top=True,right=True)
ax1x.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax1x.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax1x.legend()
#Plot the Z-component velocity vs. t for variable inclinations.
ax1v.set_xlabel('Time (days)',fontsize=15)
ax1v.set_ylabel(r'$\textrm{V}_{\textrm{z}}$ (km s$^{-1}$)',fontsize=15)
ax1v.tick_params(axis='both',direction='in')
ax1v.tick_params(axis='both',which='minor',direction='in')
ax1v.tick_params(top=True,right=True)
ax1v.tick_params(which='minor',top=True,right=True)
ax1v.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax1v.yaxis.set_minor_locator(ticker.MultipleLocator(1))
ax1v.legend()
#This bit will iterate through the values of w.
pos_varWOM_array = np.zeros((num_steps,3,5),dtype='f8')
vel_varWOM_array = np.zeros((num_steps,3,5),dtype='f8')
for j in range(5):
back_half_check = 0
pos_varWOM_array[0,:,j] = R0
vel_varWOM_array[0,:,j] = V0
a_temp = 1.25*mks.au
e_temp = 0.5
O_temp = 50*np.pi/180
i_temp = 60*np.pi/180
w_temp = var_params['wom'][j]
Ep = 0
#Generate all X,Y for the given w in this iteration.
for k in range(1,num_steps):
t = t_arr[k]
if k > (num_steps/2):
back_half_check = 1
pos_vecw, vel_vecw,orb_posw,E_n = pos_vel(a_temp,e_temp,O_temp,i_temp,w_temp,t,T,back_half_check,mag_h,Ep)
Ep = E_n
pos_varWOM_array[k,:,j],vel_varWOM_array[k,:,j] = pos_vecw[:],vel_vecw[:]
#And plot each with the right labels.
plabWOM = r'$\omega$ = %6.2f$^{\circ}$'%(w_temp*180/np.pi)
ax2.scatter(pos_varWOM_array[:,0,j]/mks.au,pos_varWOM_array[:,1,j]/mks.au,marker='.',label=plabWOM)
ax2x.scatter(t_arr/86400,pos_varWOM_array[:,0,j]/mks.au,marker='.',label=plab)
ax2v.scatter(t_arr/86400,vel_varWOM_array[:,2,j]/1000,marker='.',label=plab)
#Plot Y vs. X for variable w values (given e=0.5)
ax2.set_xlabel('X-Coordinate (au)',fontsize=15)
ax2.set_ylabel('Y-Coordinate (au)',fontsize=15)
ax2.tick_params(axis='both',direction='in')
ax2.tick_params(axis='both',which='minor',direction='in')
ax2.tick_params(top=True,right=True)
ax2.tick_params(which='minor',top=True,right=True)
ax2.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax2.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax2.legend()
#Plot X vs. t for variable w values (e=0.5)
ax2x.set_xlabel('Time (days)',fontsize=15)
ax2x.set_ylabel('X-Coordinate (au)',fontsize=15)
ax2x.tick_params(axis='both',direction='in')
ax2x.tick_params(axis='both',which='minor',direction='in')
ax2x.tick_params(top=True,right=True)
ax2x.tick_params(which='minor',top=True,right=True)
ax2x.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax2x.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax2x.legend()
#Plot the Z-component of velocity vs. t for variable w values (e=0.5)
ax2v.set_xlabel('Time (days)',fontsize=15)
ax2v.set_ylabel(r'$\textrm{V}_{\textrm{z}}$ (km s$^{-1}$)',fontsize=15)
ax2v.tick_params(axis='both',direction='in')
ax2v.tick_params(axis='both',which='minor',direction='in')
ax2v.tick_params(top=True,right=True)
ax2v.tick_params(which='minor',top=True,right=True)
ax2v.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax2v.yaxis.set_minor_locator(ticker.MultipleLocator(1))
ax2v.legend()
#Finally generate the plot for variable Omega, given a w = pi/2
pos_varOM_array = np.zeros((num_steps,3,5),dtype='f8')
vel_varOM_array = np.zeros((num_steps,3,5),dtype='f8')
for j in range(5):
back_half_check = 0
pos_varOM_array[0,:,j] = R0
vel_varOM_array[0,:,j] = V0
a_temp = 1.25*mks.au
e_temp = 0.5
O_temp = var_params['Om'][j]
i_temp = 60*np.pi/180
w_temp = np.pi/2
Ep = 0
#This generates all X,Y for a given O.
for k in range(1,num_steps):
t = t_arr[k]
if k > (num_steps/2):
back_half_check = 1
pos_vecO, vel_vecO,orb_posO,E_n = pos_vel(a_temp,e_temp,O_temp,i_temp,w_temp,t,T,back_half_check,mag_h,Ep)
Ep = E_n
pos_varOM_array[k,:,j],vel_varOM_array[k,:,j] = pos_vecO[:],vel_vecO[:]
#And plot with the right labels.
plabOM = r'$\Omega$ = %6.2f$^{\circ}$'%(O_temp*180/np.pi)
ax3.scatter(pos_varOM_array[:,0,j]/mks.au,pos_varOM_array[:,1,j]/mks.au,marker='.',label=plabOM)
ax3x.scatter(t_arr/86400,pos_varOM_array[:,0,j]/mks.au,marker='.',label=plab)
ax3v.scatter(t_arr/86400,vel_varOM_array[:,2,j]/1000,marker='.',label=plab)
#Plot Y vs. X for the variable Om values (w=pi/2,e=0.5)
ax3.set_xlabel('X-Coordinate (m)',fontsize=15)
ax3.set_ylabel('Y-Coordinate (m)',fontsize=15)
ax3.tick_params(axis='both',direction='in')
ax3.tick_params(axis='both',which='minor',direction='in')
ax3.tick_params(top=True,right=True)
ax3.tick_params(which='minor',top=True,right=True)
ax3.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax3.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax3.legend(loc='upper right')
#Plot X vs. t for the variable Om values (w=pi/2,e=0.5)
ax3x.set_xlabel('Time (days)',fontsize=15)
ax3x.set_ylabel('X-Coordinate (au)',fontsize=15)
ax3x.tick_params(axis='both',direction='in')
ax3x.tick_params(axis='both',which='minor',direction='in')
ax3x.tick_params(top=True,right=True)
ax3x.tick_params(which='minor',top=True,right=True)
ax3x.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax3x.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax3x.legend()
#Plot the Z-component of velocity vs. t for the variable Om values (w=pi/2,e=0.5)
#Note that since Vz doesn't depend on Omega, these will all be the same.
ax3v.set_xlabel('Time (days)',fontsize=15)
ax3v.set_ylabel(r'$\textrm{V}_{\textrm{z}}$ (km s$^{-1}$)',fontsize=15)
ax3v.tick_params(axis='both',direction='in')
ax3v.tick_params(axis='both',which='minor',direction='in')
ax3v.tick_params(top=True,right=True)
ax3v.tick_params(which='minor',top=True,right=True)
ax3v.xaxis.set_minor_locator(ticker.MultipleLocator(10))
ax3v.yaxis.set_minor_locator(ticker.MultipleLocator(1))
ax3v.legend()
plt.show()
#Here's the main function for calling all of this. If you want a self-consistent
#test point, select option 1 in the first question. THERE IS NO ERROR-CORRECTION
#IF YOU SELECT OPTION 2 AND GIVE IT GARBAGE. R0,V0 need to be self-consistent
#in option 2 or everything may break and I'm not responsible.
if __name__=='__main__':
print('Would you like to generate a test point? Or usee your own Pos/Vel Vectors?')
print('[1] Test Point\n[2] Own Parameters')
tpoint_select = input('Selection: ')
#If you want the test_point I selected, here.
if tpoint_select == '1':
r0,R0,v0,V0 = gen_test_point(1.25,0.02,60,20,50)
#This will do everything from a user-input position and velocity, but it better
#be accurate and consistent. Garbage output means input was garbage, not my
#fault.
elif tpoint_select == '2':
Xus,Yus,Zus = input('Please input X,Y,Z (in cm): ').split(',')
Vxs,Vys,Vzs = input('Please input Vx,Vy,Vz (in km/s): ').split(',')
R0 = np.array([np.float(Xus)/100,np.float(Yus)/100,np.float(Zus)/100])
V0 = np.array([np.float(Vxs)*1000,np.float(Vys)*1000,np.float(Vzs)*1000])
Rmag0 = vmag(R0)
r0 = np.array([Rmag0,0,0])
#This will print the orbital parameters used, whether back-solved from the
#test point (to make sure the code functions correctly), or generated from
#the user input.
print('\nGenerating Orbital Paramters (a,e,i,Omega,w,T,h)')
print('--------------------------------------------------')
a,e,O,i,w,T,mag_h = orbit_params(R0,V0)
#Now we iterate through time to build the array for problem 2.
num_steps = 1000
step_size = T / num_steps
#Initialize full XYZ, VxVyVz, and xyz arrays.
pos_array = np.zeros((num_steps,3),dtype='f8')
vel_array = np.zeros((num_steps,3),dtype='f8')
orb_array = np.zeros((num_steps,3),dtype='f8')
#Set the starting points.
pos_array[0,:] = R0
vel_array[0,:] = V0
orb_array[0,:] = r0
back_half_check = 0
#time as an array so I know that XYZ steps use the same time values that the
#plots will use.
t_arr = np.zeros(num_steps,dtype='f8')
t_arr = np.array([j*step_size for j in range(num_steps)])
Ep = 0 #First angle guess, since it starts at 0.
for j in range(1,num_steps):
t = t_arr[j]
if j > (num_steps/2):
back_half_check = 1
pos_vec, vel_vec,orb_pos,E_n = pos_vel(a,e,O,i,w,t,T,back_half_check,mag_h,Ep)
pos_array[j,:],vel_array[j,:],orb_array[j,:] = pos_vec,vel_vec,orb_pos
Ep = E_n #this allows the next iteration to have a better starting point
#The program will not converge for some angle is 0 is always used.
#Ask the user if they want to plot stuff. Problem 2 generates 3 plots.
#Problem 3 generates 15 plots on 3 figures.
p2_choice = input('\nWould you like to generate the plots for problem 2? [y/n]: ')
if ((p2_choice == 'y')|(p2_choice=='Y')|(p2_choice=='yes')|(p2_choice=='YES')):
prob2_plotter(pos_array,orb_array,vel_array,t_arr)
p3_choice = input('\nWould you like to generate the plots for problem 3? [y/n]: ')
if ((p3_choice == 'y')|(p3_choice=='Y')|(p3_choice=='yes')|(p3_choice=='YES')):
prob3_plotter(R0,V0)
#And we're done. No active menu this time.
print('\nGoodbye!')
|
[
"bradlyke@gmail.com"
] |
bradlyke@gmail.com
|
283e67dbe46fa60a76f7eeb5e28f4464351f743c
|
23d2a7cbb710ef6a9458f3aa6818d25afdd1e4a2
|
/1-Python/PreClass/9-OOP/1-CLASS/4-uygulama-instance.py
|
5de3119403d717110c4edd7d5f8d097a3bf4bb5c
|
[] |
no_license
|
m-aykurt/ITF-008-CLARUSWAY
|
ee8febad952e60ffd1f27211dac9c736766f5110
|
58d863b205d5b445e472ea3d8611f1f1fc1812ee
|
refs/heads/main
| 2023-07-08T21:01:58.542414
| 2021-08-05T05:30:08
| 2021-08-05T05:30:08
| 366,754,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# -*- coding: utf-8 -*-
class BankAccount:
def __init__(self,name,balance = 0.0):
self.owner = name
self.balance = balance
def get_balance(self):
return self.balance
def deposit(self,amount):
self.balance += amount
return self.balance
def withdraw(self,amount):
self.balance -= amount
return self.balance
hesap = BankAccount("Murat Aykurt",6000)
print(hesap.get_balance())
hesap.deposit(1500)
print(hesap.get_balance())
hesap.deposit(500)
print(hesap.get_balance())
hesap.withdraw(1250)
print(hesap.get_balance())
|
[
"maykurt92@gmail.com"
] |
maykurt92@gmail.com
|
fb7eb4c9d6fd755f2f34eed4a16bf49710fd62da
|
4f0b6bcde72cbc7d90ec666657c8bab0c051ad9b
|
/tests/postprocessing_test.py
|
155ac5e0d042185ce6f36eb5a74afd05fa2c639b
|
[] |
no_license
|
arkhn/jpyltime
|
acc4d10b64a70eb26282f53fdb4f913fb1e5731e
|
2fcf017bceeb9a0c17ceb31d16443f17eb374188
|
refs/heads/main
| 2023-04-02T06:41:52.237342
| 2021-04-12T12:25:26
| 2021-04-12T12:25:26
| 350,370,341
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
import json
import pandas as pd
import pytest
from jpyltime.postprocessing_fhir2ds import FHIR2DS_Postprocessing
from jpyltime.utils import Attribute
@pytest.fixture
def map_attributes():
attribute_file = "jpyltime/documents/attributes_mapping.json"
with open(attribute_file, "r") as f:
map_attributes = json.loads(f.read())
return map_attributes
@pytest.fixture
def example_dataframe():
data = [
["8392", "tom", 10, "ICD", 22, "mg", 50, "kg", "2019-20-20"],
["8392", "tom", 1, "ICD", 8493, "L", 50, "kg", "2019-20-20"],
["2038", "nick", 10, "ICD", 22, "mg", 90, "kg", "2019-03-23"],
["9382", "julie", 1, "ICD", 38, "L", 92, "kg", "1300-05-17"],
["3728", "john", 10, "ICD", 22, "mg", 20, "kg", "2839-11-20"],
]
# Create the pandas DataFrame
df = pd.DataFrame(
data,
columns=[
"Patient:from_id",
"Patient.name.given",
"MedicationRequest:from_id",
"MedicationRequest.medicationCodeableConcept.coding.display",
"MedicationRequest.dosageInstruction.doseAndRate.doseQuantity.value",
"MedicationRequest.dosageInstruction.doseAndRate.doseQuantity.unit",
"Weight.valueQuantity.value",
"Weight.valueQuantity.unit",
"Patient.birthDate",
],
)
return df
def test_postprocessing(map_attributes, example_dataframe, attributes):
anonymization_symbol = "*"
postprocessing = FHIR2DS_Postprocessing(
map_attributes, anonymization_symbol=anonymization_symbol
)
display_df = postprocessing.postprocess(example_dataframe, attributes)
expected_columns = ["Prénom", "Anniversaire", "Poids", "Médicaments"]
expected = pd.DataFrame(
[
["*", "2839-11-20", ["20 kg"], ["ICD 22 mg"]],
["*", "1300-05-17", ["92 kg"], ["ICD 38 L"]],
["*", "2019-03-23", ["90 kg"], ["ICD 22 mg"]],
["*", "2019-20-20", ["50 kg", "50 kg"], ["ICD 22 mg", "ICD 8493 L"]],
],
columns=expected_columns,
)
assert expected.equals(display_df)
|
[
"naudin.louise@gmail.com"
] |
naudin.louise@gmail.com
|
92305e9ebbee824bea7741033cf2a6b1cda3f9e9
|
9d65e8c566992b53ed164d70f6e07a2618725871
|
/cmsapp/migrations/0040_auto_20190201_1030.py
|
cccc2ce84dfa36d1e9ab3fe81e791ccc59c8607c
|
[] |
no_license
|
akdeveloper0791/green_content
|
0141b0784a4a58a84429cac5d326f65edbf11921
|
c04c8d863b90cd2ff4d2e26b4e814f4352251191
|
refs/heads/master
| 2022-05-18T09:37:51.475697
| 2020-10-26T05:27:10
| 2020-10-26T05:27:10
| 164,104,618
| 0
| 0
| null | 2022-04-22T21:05:12
| 2019-01-04T12:25:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
# Generated by Django 2.0.9 on 2019-02-01 05:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsapp', '0039_auto_20190105_1121'),
]
operations = [
migrations.AddField(
model_name='multiple_campaign_upload',
name='camp_type',
field=models.SmallIntegerField(default=-1),
),
migrations.AddField(
model_name='multiple_campaign_upload',
name='campaign_size',
field=models.CharField(default=0, max_length=50),
),
migrations.AddField(
model_name='multiple_campaign_upload',
name='is_skip',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='multiple_campaign_upload',
name='stor_location',
field=models.SmallIntegerField(default=1),
),
migrations.AlterField(
model_name='multiple_campaign_upload',
name='campaign_uploaded_by',
field=models.BigIntegerField(),
),
migrations.AlterUniqueTogether(
name='multiple_campaign_upload',
unique_together={('campaign_uploaded_by', 'campaign_name')},
),
]
|
[
"vineethkumar0791@gmail.com"
] |
vineethkumar0791@gmail.com
|
2f9e354a1784f5eaf979f8599925fe45d9fbdaa5
|
72a91a27a88cc6e41ab3afe89736261bd8ba87c6
|
/code/testevo.py
|
a2558dab66054601941becec223d4b2daa7596df
|
[] |
no_license
|
youcefhd/magisterka
|
8d1f2c252f7634963db5c144f940e38d55ad7670
|
13e2c21e989ffed1ba3901affad957d2a6d1bede
|
refs/heads/master
| 2020-06-04T21:48:33.150546
| 2011-05-18T21:09:32
| 2011-05-18T21:09:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#! /usr/bin/env python2
# coding: utf-8
from pyfis.struct import *
from pyfis.anfis import *
from pyfis.evofis import *
from numpy import *
# FIS with generalized bell membership functions
fis = Fis(defuzzmethod="sum")
for j in range(2):
inp = Input()
for i in range(4):
inp.mem_func.append(BellMemFunc([3.3, 4, -10+i*6.6]))
fis.inputs.append(inp)
for i in range(4):
for j in range(4):
rule = Rule([0, 0, 0])
rule.inputs.append((0, i))
rule.inputs.append((1, j))
fis.rules.append(rule)
print fis_to_vector(fis)
|
[
"swian@post.pl"
] |
swian@post.pl
|
613efd4b6814f097159ee130eeefac65fad03562
|
5fc0dfa0703a830b58a29ad9526a43f812886988
|
/wingman.py
|
dc4f16cd80e7c6d3564f31efe79f930d68e8775a
|
[] |
no_license
|
liangz678/arduplane_formation_flying
|
ca652e097167349253705fc734329ebd8f32bf98
|
11b09a2b046c36a56c58aca014321581abf22aef
|
refs/heads/master
| 2022-12-10T11:51:21.930795
| 2020-09-05T16:16:02
| 2020-09-05T16:16:02
| 292,177,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,511
|
py
|
from dronekit import Vehicle, Command, VehicleMode
from pymavlink import mavutil
class Wingman(Vehicle):
def __init__(self, *args):
super(Wingman, self).__init__(*args)
self.max_speed = 35
self.min_speed = 12
self.aspd2thr = [
{"thr": 10, "aspd": 10},
{"thr": 20, "aspd": 15},
{"thr": 33, "aspd": 22},
{"thr": 50, "aspd": 27},
{"thr": 80, "aspd": 33},
{"thr": 100, "aspd": 36},
]
@property
def thr_aspd(self):
return self.airspeed
@thr_aspd.setter
def set_thr_aspd(self, aspd):
aspd = max(aspd, self.min_speed)
aspd = min(aspd, self.max_speed)
thr = -1
for item in self.aspd2thr:
if item["aspd"] >= aspd:
thr = item["thr"]
break
if thr == -1:
thr = 100
if self.mode.name == "AUTO":
speed_type = 0 # air speed
msg = self.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED, # command
0, # confirmation
speed_type, # param 1
aspd, # speed in metres/second
thr, 0, 0, 0, 0 # param 3 - 7
)
self.send_mavlink(msg)
return True
if self.mode.name == 'GUIDED':
speed_type = 0 # air speed
msg = self.message_factory.command_int_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
mavutil.mavlink.MAV_CMD_GUIDED_CHANGE_SPEED, # command
1,#current
False,#autocontinue
speed_type, # param 1
aspd, # speed in metres/second
0, 0, 0, 0, 0 # param 3 - 7
)
print('sd',aspd)
self.send_mavlink(msg)
return True
return False
def follow(self,pos):
self.commands.clear()
self.commands.wait_ready()
self.commands.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, pos.lat, pos.lon, pos.alt+10))
self.commands.upload()
self.commands.wait_ready()
self.mode = VehicleMode("AUTO")
self.commands.next = 1
|
[
"liangz1991@126.com"
] |
liangz1991@126.com
|
a9a58fa611d25c519139aa2728e61ea88edcabea
|
99c64332ccadbbf62c55920964af41b7ae337194
|
/Python-sys/working_with_files.py
|
68f14af7ea0ea694a29536835ee7620c119d0cba
|
[] |
no_license
|
ChastityAM/Python
|
871c979a29f77d68f17ac6f9bed25485e445679c
|
ab1c0793c36d7bb07926985d167a6b99fa2acea9
|
refs/heads/main
| 2023-04-12T19:37:02.591871
| 2021-05-06T15:03:16
| 2021-05-06T15:03:16
| 341,973,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
#This opens a file and then closes file
#with open('genericTxtfile.txt', 'r') as f:
# print(f.read())
#print(f.read(2)) reads 2 characters in file
#print(f.readlines()) reads all lines
#print(f.readline().strip()) reads first line removing whitespaces
#print(f.readline().strip('\n')) reads second line removing newlines
#for line in f:
#print(line) #prints lines with spaces between them
#f.seek(0) # to go back to the line specified
#with open('genericTxtfile.txt', 'a') as f:
# f.write("\n fifth line")
import os
def prepend_multiple_lines(file_name, list_of_lines):
"""Insert given list of strings as a new lines at the beginning of a file"""
# define name of temporary dummy file
dummy_file = file_name + '.bak'
# open given original file in read mode and dummy file in write mode
with open(file_name , 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Iterate over the given list of strings and write them to dummy file as lines
for line in list_of_lines:
write_obj.write(line + '\n')
# Read lines from original file one by one and append them to the dummy file
for line in read_obj:
write_obj.write(line)
# remove original file
os.remove(file_name)
# Rename dummy file as the original file
os.rename(dummy_file, file_name)
def main():
print('*** Insert a line at the top of a file ***')
# Insert a line before the first line of a file 'sample.txt'
a_list.insert("sample.txt", "This is a first line")
print('*** Insert multiple lines at the beginning of a file ***')
list_of_lines = ['New Line 1', 'New Line 2']
# Insert strings in a list as new lines at the top of file 'sample.txt'
prepend_multiple_lines("sample.txt", list_of_lines)
if __name__ == '__main__':
main()
|
[
"chastity.mascarenas.e@gmail.com"
] |
chastity.mascarenas.e@gmail.com
|
890412cebe36b9b61ea39dc9f9193816c6d51130
|
7efa2a5179b6152e7e413657d90dc929956d5ecd
|
/fpre/tests/user_a.py
|
32b15859d479a55e928b91324693db1fe9daa2fb
|
[
"BSD-2-Clause"
] |
permissive
|
fabian-hk/Secure-Two-Party-Computation
|
7b8cfc08c4b155ccc7e8fe31df7089f00696a482
|
f7e10a0a5c1b0361dd700391d81cdcc75612666d
|
refs/heads/master
| 2020-05-24T21:48:04.861586
| 2019-06-01T20:56:34
| 2019-06-01T20:56:34
| 187,481,351
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
from fpre.fpre import Fpre
import fpre.f_la_and as flaand
from protobuf import FunctionDependentPreprocessing_pb2
from conf import conf
certificate = "certificate-alice"
partner = "bob.mpc"
com = Fpre(conf.test_server_ip, conf.test_server_port, certificate, partner)
com.init_fpre()
auth_bits = flaand.get_authbits(com.person, com, 2)
and_triple = FunctionDependentPreprocessing_pb2.ANDTriple()
and_triple.id = 0
auth_bits = iter(auth_bits.bits)
auth_bit = next(auth_bits)
and_triple.r1 = auth_bit.r
and_triple.M1 = auth_bit.M
and_triple.K1 = auth_bit.K
auth_bit = next(auth_bits)
and_triple.r2 = auth_bit.r
and_triple.M2 = auth_bit.M
and_triple.K2 = auth_bit.K
flaand.f_la_and(com, com.person, and_triple)
com.close_session()
|
[
"hauckfabian@gmail.com"
] |
hauckfabian@gmail.com
|
1317977d88f7dfc8ca7eec39a944aa1dc13d38fd
|
a279aa7058dd2f8b9a2f7c2600ae803740007374
|
/backtracking.py
|
9e95b89aff71e8345f3963771c2e2420ccf80f88
|
[] |
no_license
|
priteshmehta/leetcode
|
ca0e45c16ccf6c081944724e142f1028a4d878b4
|
c7bc1ca02541a5715eacb023bc87a29981aee9b2
|
refs/heads/master
| 2022-07-28T12:59:36.167283
| 2022-07-14T16:55:05
| 2022-07-14T16:55:05
| 247,047,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
def permute(list, s):
if list == 1:
return s
else:
return [(y,x)
for y in permute(1, s)
for x in permute(list - 1, s)
]
#print(permute(1, ["a","b","c"]))
#print(permute(2, ["a","b","c"]))
def my_permute(list, s):
if list == 1:
return s
else:
#a = [ y for y in permute(1, s)]
a = [ (x,y,z)
for x in ["x1","x2","x3"]
for y in ["y1","y2","y3"]
for z in ["z1","z2","z3"]
]
return a
def permute3(list_size, s):
a = []
if list_size == 1:
return s
else:
for y in permute(1, s):
for x in permute(1, s[1:]):
for z in permute(1, s[2:]):
if x + y + z == 0:
if (x,y,z) not in a:
a.append((x, y, z))
return list(set(a))
ans = permute3(3, [-1, 0, 1, 2, -1, -4])
print(ans)
#[-1, 0, 1, 2, -1, -4]
|
[
"mehta.pritesh@gmail.com"
] |
mehta.pritesh@gmail.com
|
a2f6d57ad69321fff1bb9c7d283efe37623dfdd0
|
b0bfbe345426ed89a6c1d576bd6a8533a523ed66
|
/email_list.py
|
b71358113fd6ba1201b6cd51f7dec93bf1c08ca3
|
[
"MIT"
] |
permissive
|
danielhcai/facebook-group-scanner
|
63266cba717593459abfcb9cdd28f004fb0dd947
|
453b90a33a247b27963fc74d5b120ab221330736
|
refs/heads/master
| 2023-06-05T07:42:39.335288
| 2021-06-11T19:31:27
| 2021-06-11T19:31:27
| 325,150,144
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import os
email_path = "./emails/"
if __name__ == "__main__":
email_set = set()
for filename in os.listdir(email_path):
file = open(email_path + filename, "r")
for email in file:
email_set.add(email)
file.close()
file = open(email_path + "final_list.txt", "w")
for email in email_set:
file.write(email)
file.close()
|
[
"danielhcai@gmail.com"
] |
danielhcai@gmail.com
|
abe426da3a658e528aaf319d1ee2ba20c3ff66c5
|
fcbd0d7ef2222b08263ba70f2158edad30cc351b
|
/user/models.py
|
a9ba8d2b9e2ff9b5ddb2134e4f59f539ffbb0e91
|
[] |
no_license
|
zlcbs/DjangoEmailVerify
|
3b0f0c679425bcbcaba018ff05d4162a02d2924c
|
80d164ac7457238a3b9dba47911bb7ed94a819fa
|
refs/heads/master
| 2020-06-22T10:42:11.807381
| 2019-07-19T04:26:49
| 2019-07-19T04:26:49
| 197,700,857
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
from django.conf import settings
from django.db import models
# django密码转换
from django.contrib.auth.hashers import make_password
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired
from itsdangerous import BadSignature, SignatureExpired
# Create your models here.
class Users(models.Model):
nickname = models.CharField(max_length=16, null=False, blank=False, unique=True)
email = models.EmailField(max_length=32, null=False, blank=False, unique=True)
password = models.CharField(max_length=64, null=False, blank=False)
head = models.ImageField(default="decault.png")
age = models.CharField(max_length=3, blank=True, null=True)
sex = models.CharField(max_length=2, blank=True, null=True)
isactivate = models.BooleanField(default=False)
def save(self):
if not self.password.startswith('pbkdf2_'):
self.password = make_password(self.password)
super().save()
# 生成token
def generate_activate_token(self, expires_in=360):
s = Serializer(settings.SECRET_KEY, expires_in)
return s.dumps({'id': self.id})
# token校验
@staticmethod
def check_activate_token(token):
s = Serializer(settings.SECRET_KEY)
try:
data = s.loads(token)
except BadSignature:
return '无效的激活码'
except SignatureExpired:
return '激活码已过期'
user = Users.objects.filter(id=data.get('id'))[0]
if not user:
return '激活的账号不存在'
if not user.isactivate:
user.isactivate = True
user.save()
return '激活成功'
|
[
"zlcbs@outlook.com"
] |
zlcbs@outlook.com
|
0df44d191cee41e9c549ca85a17895bbc695134f
|
7abf6549410dd9aa0fc07097e768e3c68e963e30
|
/sudoku_solver.py
|
68167d775712c353c32c8b48cfda437d08290a4e
|
[] |
no_license
|
Reyansh14/Sudoku-Solver
|
1be598639d907fe3c6e9a8f512d8204349eb4bdc
|
45ffdccac651d8ffc4f166cc813ca5f20369ec36
|
refs/heads/main
| 2023-03-20T23:08:46.465885
| 2021-03-13T06:33:36
| 2021-03-13T06:33:36
| 328,460,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,779
|
py
|
# * This is a Sudoku Solver implemented with a backtracking algorithm. Rather than using brute force, the backtracking algorithm takes a step back
# * each time it runs into an incorrect solution and tries another value.
# Algorithm:
# 1: Find an empty space, denoted by 0
# 2: Try entering digits 1-9. If it works, move on to the next spot. Else, try the next digit.
# 3: If the entered number is invalid, go to the previous step and try a new number.
# 4: Perform steps 1-3 until the board is filled.
# TODO: Add some more boards ranging in difficulty from easy to hard.
# Defining sample sudoku boards:
board1 = [
[7, 8, 0, 4, 0, 0, 1, 2, 0],
[6, 0, 0, 0, 7, 5, 0, 0, 9],
[0, 0, 0, 6, 0, 1, 0, 7, 8],
[0, 0, 7, 0, 4, 0, 2, 6, 0],
[0, 0, 1, 0, 5, 0, 9, 3, 0],
[9, 0, 4, 0, 6, 0, 0, 0, 5],
[0, 7, 0, 3, 0, 0, 0, 1, 2],
[1, 2, 0, 0, 0, 7, 4, 0, 0],
[0, 4, 9, 2, 0, 6, 0, 0, 7]
]
board2 = [
[5, 6, 0, 0, 1, 0, 0, 0, 0],
[3, 0, 0, 7, 0, 0, 9, 1, 0],
[1, 0, 0, 0, 5, 2, 0, 0, 3],
[8, 5, 0, 6, 0, 0, 0, 0, 0],
[9, 0, 0, 0, 0, 0, 0, 7, 1],
[4, 0, 0, 0, 0, 0, 2, 0, 0],
[6, 0, 0, 9, 0, 4, 0, 3, 2],
[7, 0, 0, 0, 0, 1, 5, 0, 0],
[2, 0, 0, 0, 3, 0, 0, 6, 4]
]
# make_board prints out the board in terminal with appropriate row and column divisions.
def make_board(board):
for i in range(len(board)):
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - -")
for j in range(len(board[0])):
if j % 3 == 0 and j != 0:
print(" | ", end="")
if j == 8:
print(board[i][j])
else:
print(str(board[i][j]) + " ", end="")
# get_empty_space finds empty spaces on the board denoted by 0 by looping through each element in the given board.
def get_empty_space(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
return (i, j) # Returns row,column of an empty spot
return None
# is_valid checks whether the given solution is valid or not
def is_valid(board, num, pos):
# Check row
for i in range(len(board[0])):
if board[pos[0]][i] == num and pos[1] != i:
return False
# Check column
for i in range(len(board)):
if board[i][pos[1]] == num and pos[0] != i:
return False
# Check cube
cube_x = pos[1] // 3
cube_y = pos[0] // 3
for i in range(cube_y * 3, (cube_y * 3) + 3):
for j in range(cube_x * 3, (cube_x * 3) + 3):
if board[i][j] == num and (i, j) != pos:
return False
return True
# solve_board utilizes the helper functions above to solve the board recursively.
def solve_board(board):
get_empty = get_empty_space(board)
if not get_empty:
return True
else:
row, column = get_empty
for i in range(1, 10):
if is_valid(board, i, (row, column)):
board[row][column] = i
if solve_board(board):
return True
board[row][column] = 0
return False
print("Welcome to my Sudoku Solver. The program will solve the sudoku board by using a backtracking algorithm.")
selection = input("Enter '1' to pick board 1 or '2' to pick board 2: ")
if selection == str(1):
print("ORIGINAL BOARD 1:")
print(make_board(board1))
solve_board(board1)
print("SOLVED BOARD:")
print(make_board(board1))
else:
print("ORIGINAL BOARD 2:")
print(make_board(board2))
solve_board(board2)
print("SOLVED BOARD:")
print(make_board(board2))
|
[
"noreply@github.com"
] |
noreply@github.com
|
4f0a6cf506689d8331fef6df1a76b147b1ff06ad
|
82b495a208ebdeb71314961021fbfe767de57820
|
/chapter-13/sample02.py
|
5d7d05833306dc085a1573bee83e46cd05ba6b89
|
[
"MIT"
] |
permissive
|
krastin/pp-cs3.0
|
7c860794332e598aa74278972d5daa16853094f6
|
502be9aac2d84215db176864e443c219e5e26591
|
refs/heads/master
| 2020-05-28T02:23:58.131428
| 2019-11-13T13:06:08
| 2019-11-13T13:06:08
| 188,853,205
| 0
| 0
|
MIT
| 2019-11-13T13:06:09
| 2019-05-27T13:56:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import time
from sample01 import linear_search_while
from sample01 import linear_search_for
from sample01 import linear_search_sentinel
from typing import Callable, Any
def time_it(search: Callable[[list, Any], Any], L: list, v: Any) -> float:
"""Time how long it takes to run function search to find
value v in list L.
"""
t1 = time.perf_counter()
search(L, v)
t2 = time.perf_counter()
return (t2 - t1) * 1000.0
def print_times(v: Any, L: list) -> None:
"""Print the number of milliseconds it takes for linear_search(v, L)
to run for list.index, the while loop linear search, the for loop
linear search, and sentinel search.
"""
# Get list.index's running time.
t1 = time.perf_counter()
L.index(v)
t2 = time.perf_counter()
index_time = (t2 - t1) * 1000.0
# Get the other three running times.
while_time = time_it(linear_search_while, L, v)
for_time = time_it(linear_search_for, L, v)
sentinel_time = time_it(linear_search_sentinel, L, v)
print("{0}\t\t{1:.2f}\t{2:.2f}\t{3:.2f}\t{4:.2f}".format(
v, while_time, for_time, sentinel_time, index_time))
L = list(range(10000001)) # A list with just over ten million values
print_times(10, L) # How fast is it to search near the beginning?
print_times(5000000, L) # How fast is it to search near the middle?
print_times(10000000, L) # How fast is it to search near the end?
|
[
"krastin@hashicorp.com"
] |
krastin@hashicorp.com
|
be7eb1e17935db081cc7d27e08ce40acff95b537
|
37c360b5446af7d37a1cfdc3fc784faefd2712a1
|
/listchallenge.py
|
ede0a79592cef6b2e667f4b97fc54c99a9b33ca9
|
[] |
no_license
|
slatecas/girlswhocode
|
1ebe906f17e41f6c700e13b0343916aeaadcefb4
|
890086d04122379c37492006e98d62bb35eea8bd
|
refs/heads/master
| 2020-06-20T05:24:31.301862
| 2019-08-22T15:52:03
| 2019-08-22T15:52:03
| 197,008,873
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
#imports the ability to get a random number (we will learn more about this later!)
from random import *
#Create the list of words you want to choose from.
firstnames = ["Sally", "Nick", "Kirsten", "Lily", "Miguel", "Juliet", "Kim", "Brad"]
lastnames = ["Smith", "Rock", "Gomez", "Walker", "Deer", "Romeo", "Bello", "Pitt"]
#Generates a random integer.
randomfirst = randint(0, len(firstnames)-1)
randomlast = randint(0, len(lastnames)-1)
print(firstnames[randomfirst] +" "+ lastnames[randomlast])
|
[
"slatecas@gmail.com"
] |
slatecas@gmail.com
|
674034c2952b3b96cb70d437374061aae365c2bc
|
995616eeafc92fb80c651c2265cd6bb65126d343
|
/HEURISTICS/Created_Linked_list.py
|
04223c8db9b572ebd50fe63bc648d7a5058c4886
|
[] |
no_license
|
MaestroPravaler/Algoritmos
|
3eab67b0eefb6e47f93992c8ebbab689c2596f54
|
60286ada71ede497ab253652dd13ac8099f61924
|
refs/heads/master
| 2023-02-23T22:06:10.549598
| 2021-01-29T00:13:05
| 2021-01-29T00:13:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
# =============================== DESAFIO ================================================
# Neste desafio você deverá criar uma estrutura de lista linkada com as seguintes funções.
# Utilize a classe já declarada ao lado para resolver este desafio.
#
# insert_node_to_tail(node) => Insere um novo elemento após o último nó da lista.
#
# insert_node_to_head(node) => Insere um novo elemento como o primeiro nó da lista.
#
# is_empty() => Verifica se a lista está vazia ou não.
#
# head() => Retorna o primeiro elemento da lista.
#
# tail() => Retorna o último elemento da lista.
# ==========================================================================================
class Node:
def __init__(self, value):
self.next = None
self.value = value
class LinkedList:
def __init__(self):
self._head = Node(None)
def insert_node_to_tail(self, node):
self.tail().next = node
def insert_node_to_head(self, node):
if self._head.next:
head_element = self._head
node.next, head_element.next = head_element.next, node
self._head.next = node
def is_empty(self):
return self._head.next is None
def head(self):
return self._head.next
def tail(self):
current = self._head
while current.next:
current = current.next
return current
|
[
"robson_maestro@alumni.usp.br"
] |
robson_maestro@alumni.usp.br
|
aac4db2e2f613a796ff33628461587fd26159cfb
|
db4d56e63c63cd577c3871349ffa2a7c39c80edc
|
/3.WEB/cxr_project/cxr_project/wsgi.py
|
b576c8906ada1a87940826b1a379206b6c76b16d
|
[] |
no_license
|
Lagom92/CXR_AI
|
33014b7471775e776ed51bfeb88128fd7ca4ce6f
|
bb4bbaf3fc984938f153bf6b58ed99324f779070
|
refs/heads/master
| 2023-06-09T11:20:57.613207
| 2021-06-20T11:34:21
| 2021-06-20T11:34:21
| 293,966,064
| 0
| 0
| null | 2021-06-18T00:09:48
| 2020-09-09T01:08:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for cxr_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cxr_project.settings')
application = get_wsgi_application()
|
[
"jyyt0147@gmail.com"
] |
jyyt0147@gmail.com
|
f937222fd7771c8323cceb1c43e840a9ebbda87b
|
86b0256a5da9859d91a9bc96ac3bd0c6143e866a
|
/Clustering and Dimension Reduction/D.py
|
16d1d20d2b8cb2681a31f06c4adce5f1e09ff987
|
[] |
no_license
|
abhishekkgp/Machine-learning
|
69698eb683e7fc713d20f7f267e52793916736e5
|
63b813ebcff73360d130ec461276c8a5c2ab0fbb
|
refs/heads/master
| 2021-12-30T21:15:49.834130
| 2021-10-08T06:24:52
| 2021-10-08T06:24:52
| 254,072,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,969
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# imports
import numpy as np
import matplotlib.pyplot as plt
from heapq import heappush, heappop
import pandas as pd
from pprint import pprint
import math
import time
import sys
import random
import copy
data1=pd.read_csv('AllBooks_baseline_DTM_Labelled.csv') #importing the data
n=len(data1.axes[0]) #to find number of rows in each attribute
data1.drop([13], inplace = True) # removing "Buddhism_Ch14" from Dataframe
data1.reset_index(drop=True, inplace=True) # to adjust indices accordingly
data1["Unnamed: 0"]=data1["Unnamed: 0"].str.replace(r'_Ch', '') # to remove "_Ch"
data1["Unnamed: 0"] = data1["Unnamed: 0"].str.replace('\d+', '') # to remove number after book name
#print(data1.iloc[0:20,0:])
data_f=data1.drop(data1.columns[0],axis='columns')
#data=data2.iloc[:,:].values
'''data=np.array(data) # tranforming into numpy array
data=np.float64(data)
num_rows=data.shape[0]
num_columns=data.shape[1]
for j in range(0,num_columns):
count=0
for i in range(0,num_rows):
count+=data[i][j]
for i in range(0,num_rows):
data[i][j]=1.0*data[i][j]*math.log(1.0*(1+num_rows)/(1+count))
for i in range(0,num_rows): #Normalizing each datapoint by dividing by the magnitude
magnitude=0
for j in range(0,num_columns):
magnitude+=data[i][j]*data[i][j]
magnitude=math.sqrt(magnitude)
if(magnitude==0): #There is a single point data point with magnitude zero or which contains an empty row
continue
for j in range(0,num_columns):
data[i][j]/=magnitude
# Data can be used for furthur calculations
print("\n \n print final value of data: ")
print(data[0:15,0:15])'''
print(data_f.head())
# In[2]:
# Importing standardscalar module
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
# fitting
scalar.fit(data_f)
scaled_data = scalar.transform(data_f)
# Importing PCA
from sklearn.decomposition import PCA
# Let's say, components = 2
pca = PCA(n_components = 100)
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
#x_pca.shape
df=x_pca # for k- means clustering
data_agglo=x_pca # for agglomorative clustering
# giving a larger plot
plt.figure(figsize =(8, 6))
plt.scatter(df[:, 0], df[:, 1])
# In[3]:
num_rows=df.shape[0]
num_columns=df.shape[1]
print(num_columns)
# In[4]:
# components
print(pca.components_ )
# In[5]:
print(x_pca[0:15,:])
#df=x_pca #now df is final array to reduced features
# In[6]:
#Applying K-mean used method on this to find 8 clusters
data=np.array(df) # tranforming into numpy array
data=np.float64(data)
num_rows=data.shape[0]
num_columns=data.shape[1]
for j in range(0,num_columns):
count=0
for i in range(0,num_rows):
count+=data[i][j]
for i in range(0,num_rows):
data[i][j]=1.0*data[i][j]*math.log(1.0*(1+num_rows)/(1+count))
for i in range(0,num_rows): #Normalizing each datapoint by dividing by the magnitude
magnitude=0
for j in range(0,num_columns):
magnitude+=data[i][j]*data[i][j]
magnitude=math.sqrt(magnitude)
if(magnitude==0): #There is a single point data point with magnitude zero or which contains an empty row
continue
for j in range(0,num_columns):
data[i][j]/=magnitude
# Data can be used for furthur calculations
print("\n \n print final value of data: ")
print(data[0:15,0:15])
# In[2]:
def distance(centroids,data,K):
distance1=np.ones(K)
for i in range(K):
distance1[i]=np.dot(centroids[i],data) # centroid was not normalise so we need to normlised it.
dinominator=np.sqrt(np.dot(centroids[i],centroids[i])*np.dot(data,data)) # data was already normalised so np.dot(data,data)=1.
distance1[i]=distance1[i]/dinominator
return(np.exp(-distance1))
# In[3]:
K=8 # to obtain K=8 clusters of documents
iterations=300
num_rows1=data.shape[0]
num_columns1=data.shape[1]
centroids=np.random.rand(K,num_columns1)
# In[12]:
belongs_to=np.ones(len(data))
SSE=np.zeros(iterations)
for itr in range(iterations):
for i in range(len(data)):
distances=distance(centroids,data[i],K)
SSE[itr]+=(distances.sum())
min_dist_index=0
min_dist=distances[0]
for j in range(1,K):
if(distances[j]<min_dist):
min_dist=distances[j]
min_dist_index=j
belongs_to[i]=min_dist_index
centroids[:]=0
count=np.zeros(K)
for i in range(len(belongs_to)):
centroids[int(belongs_to[i])]+=data[i]
count[int(belongs_to[i])]+=1
for i in range(K):
if(count[i]!=0):
centroids[i]/=count[i]
cluster=[[],[],[],[],[],[],[],[]]
for i in range(len(belongs_to)):
cluster[int(belongs_to[i])].append(i)
sorted_cluster=sorted(cluster)
#return(sorted_cluster,centroids,SSE)
# In[13]:
Output=sorted_cluster
print(Output)
# In[6]:
#import matplotlib.pyplot as plt
plt.figure(figsize=[15,8])
plt.plot(range(iterations),SSE,c='g')
plt.xlabel('Iteration')
plt.ylabel('SSE')
plt.show()
# In[7]:
f=open("E:/SEMESTER 6/ML-CS60050/Assgnment/Third/kmeans_reduced.txt",'w')
for i in range(8):
for j in sorted_cluster[i]:
f.write(str(j))
f.write(",")
f.write("\n")
f.close()
# In[8]:
plt.scatter(data[:,0],data[:,1],c='blue',label='unclustered data')
#plt.xlabel('Rows')
#plt.ylabel('columns')
plt.legend()
plt.title('Plot of data points')
plt.show()
# In[9]:
# Applying heirarical method on data_agglo array
def cosine_similarity_distance(point_1, point_2): # to find distance
distance=0.0
distance=np.dot(point_1, point_2)
dinominator=np.sqrt(np.dot(point_1,point_1)*np.dot(point_2,point_2))
distance=distance/dinominator
return(np.exp(-distance))
def add_points(point_1, point_2):
for i in range(0, len(point_1)):
point_1[i] = float(point_1[i]) + float(point_2[i])
return point_1
# In[3]:
data_a=data_agglo
k_value=8 # no. of cluster =8
#distances=[]
distances=[]
for i in range(len(data_a)):
row=[]
for j in range(i):
row.append(cosine_similarity_distance(data_a[i],data_a[j]))
distances.append(row)
del(row)
cluster=[]
for i in range(len(data)):
cluster.append([i])
while(len(cluster)>k_value):
min1=10000
combine=[0,1]
for i in range(len(cluster)):
for j in range(i+1,len(cluster)):
#temp=single_linkage(cluster,distances,i,j)
min_value=1000
for m in cluster[i]:
for n in cluster[j]:
if(m>n):
if(min_value>distances[m][n]):
min_value=distances[m][n]
else:
min_value=distances[n][m]
temp=min_value
if(min1>temp):
min1=temp
combine[0]=i
combine[1]=j
cluster[combine[0]]=cluster[combine[0]]+cluster[combine[1]]
del(cluster[combine[1]])
sorted_cluster=sorted(cluster)
# In[4]:
#k_value=8 # 8 cluster we taking here
#agglomerative_local(data2, k_value)
#cluster=centroid_points
#for i in range(len(cluster)):
# cluster[i]=sorted(cluster[i]) # sorting each row of final cluster or say centroid point
#sorted_cluster=sorted(cluster) # sorting the final value
print('cluster size: ',len(sorted_cluster))
j=1
for i in sorted_cluster: # total 8 cluster
print("#")
#print("cluster-",j,"=",i)
#print("\n \n")
#j=j+1
for i in sorted_cluster: # total 8 cluster
print("#")
print("cluster-",j,"=",i)
print("\n \n")
j=j+1
# In[5]:
f=open("E:/SEMESTER 6/ML-CS60050/Assgnment/Third/agglomerative_reduced.txt",'w')
for i in range(len(sorted_cluster)):
for j in sorted_cluster[i]:
f.write(str(j))
f.write(",")
f.write("\n")
f.close()
# Actual code ends here
# In[8]:
len(data_a[0])
# In[ ]:
|
[
"noreply@github.com"
] |
noreply@github.com
|
8016bbcceb350db726511f5b1f6141a2dbd37250
|
2d8fad815205a008114d9e9848da8eaf477c4d16
|
/server/server.py
|
4eb245cf94767dbdbb8829c3b32aae4212cd8a37
|
[
"MIT"
] |
permissive
|
thesecretlab/burgerthing
|
558ec884cc1cce91cf1b4dcb81ff51b6cad139d6
|
d368ec45ff1109eb1265f583dc8a64e0ceb47d64
|
refs/heads/master
| 2021-04-12T04:38:07.970485
| 2014-10-16T22:19:56
| 2014-10-16T22:19:56
| 25,182,408
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
import os
from flask import Flask, render_template, session, redirect, url_for, request
from flask.ext.script import Manager, Shell
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask.ext.sqlalchemy import SQLAlchemy
import json
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = '8tqwe7dsgicga796rg23bouqywf'
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['DEBUG'] = True
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
class Ingredient(db.Model):
__tablename__ = 'ingredients'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
def __repr__(self):
return '<Ingredient %r>' % self.name
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
ingredients = db.Column(db.String(256), unique=False)
def __repr__(self):
return '<Order %r (%r)>' % (self.id, self.ingredients)
@app.route('/')
def list_orders():
orders = Order.query.all()
return render_template('orders.html', orders=orders)
@app.route('/about')
def show_about():
return render_template('about.html')
@app.route('/orders/new', methods=['POST'])
def create_orders():
if 'ingredients' not in request.form or len(request.form['ingredients']) == 0:
msg = "Invalid order: No ingredients provided"
return msg, 400
try:
order = Order(ingredients = request.form['ingredients'])
db.session.add(order)
db.session.commit()
except Exception:
msg = "Invalid order!"
return msg, 400
return str(order.id)
@app.route('/orders/delete/<order_id>')
def remove_order(order_id):
order = Order.query.filter_by(id=order_id).first()
if order is None:
msg = "Order not found"
return json.dumps({"error": msg}), 404
db.session.delete(order)
db.session.commit()
return redirect(url_for('list_orders'))
@app.route('/orders.json')
def list_orders_json():
orders = Order.query.all()
json_orders = []
for order in orders:
d = {
"id": order.id,
"ingredients": order.ingredients
}
json_orders.append(d)
return json.dumps({"orders":json_orders})
@app.route('/ingredients')
def list_ingredients():
ingredients = Ingredient.query.all()
return render_template('ingredients.html', ingredients=ingredients)
@app.route('/ingredients.json')
def list_ingredients_json():
ingredients = Ingredient.query.all()
json_ingredients = []
for ingredient in ingredients:
d = {
"name": ingredient.name
}
json_ingredients.append(d)
return json.dumps({"ingredients":json_ingredients})
if __name__ == '__main__':
db.create_all()
manager.run()
|
[
"jon@secretlab.com.au"
] |
jon@secretlab.com.au
|
5e50c90e36940a756c0066a4f1a0415e5c585153
|
bc2a96e8b529b0c750f6bc1d0424300af9743904
|
/acapy_client/models/v20_pres_ex_record_list.py
|
637f5f3594379b6fb12a0376417eca62ccdfbc8b
|
[
"Apache-2.0"
] |
permissive
|
TimoGlastra/acapy-client
|
d091fd67c97a57f2b3462353459780281de51281
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
refs/heads/main
| 2023-06-29T22:45:07.541728
| 2021-08-03T15:54:48
| 2021-08-03T15:54:48
| 396,015,854
| 1
| 0
|
Apache-2.0
| 2021-08-14T13:22:28
| 2021-08-14T13:22:27
| null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.v20_pres_ex_record import V20PresExRecord
from ..types import UNSET, Unset
T = TypeVar("T", bound="V20PresExRecordList")
@attr.s(auto_attribs=True)
class V20PresExRecordList:
""" """
results: Union[Unset, List[V20PresExRecord]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
results: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.results, Unset):
results = []
for results_item_data in self.results:
results_item = results_item_data.to_dict()
results.append(results_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if results is not UNSET:
field_dict["results"] = results
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
results = []
_results = d.pop("results", UNSET)
for results_item_data in _results or []:
results_item = V20PresExRecord.from_dict(results_item_data)
results.append(results_item)
v20_pres_ex_record_list = cls(
results=results,
)
v20_pres_ex_record_list.additional_properties = d
return v20_pres_ex_record_list
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
[
"dbluhm@pm.me"
] |
dbluhm@pm.me
|
06e44c3cb8063b94be445a2a1321f3db2f6243a2
|
e5606704ee22c5ac79064472557640824e983101
|
/manage.py
|
13fe56687a6da9fbdc62731fd558e8718b3ce9e6
|
[] |
no_license
|
anandzzz1973/notes_portal
|
6ad86789e7124ce56d0fe488814bd5c37efa63b8
|
05d7266b7cbad731cc9b3bee91c670b4a9753543
|
refs/heads/master
| 2022-04-23T20:07:54.283871
| 2020-04-20T09:24:53
| 2020-04-20T09:24:53
| 257,230,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'notesportal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"anandzzz1973@gmail.com"
] |
anandzzz1973@gmail.com
|
cbf6623877f12fe14c7d0749337d94361c3e3834
|
03a3cf7c9495612d233eaa362e3c355fb3de445f
|
/ir.py
|
f9d4a2d8a30ffe8aefb3f6ec92e5d7232ce789bb
|
[] |
no_license
|
ijahongirmirzo/raspberry_car
|
b268169190c138753e6318cd1199bde6e90285a9
|
b8c155fcc9f4b4666d2c6528a8ad9064c94b39a1
|
refs/heads/master
| 2023-05-08T22:43:34.801285
| 2021-05-30T09:15:24
| 2021-05-30T09:15:24
| 349,775,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
import time
from car_details import get_obstacle
while True:
get_obstacle()
time.sleep(0.5)
print('---------')
|
[
"ijahongirmirzo@gmail.com"
] |
ijahongirmirzo@gmail.com
|
944a8ae56aaf3a8a42991d685ebf10a9b08a1acc
|
b0a1b7eb6e755fc2f57ea881ab9e662b67e186be
|
/BS Spider/tutorial/spiders/quotes_spider.py
|
2c85814a6f2bcbf0250ac14456c5b6afe6fd9e65
|
[] |
no_license
|
swgleave/BS-Robot
|
3cbc2b56aeecabb7a5bcb9be909b4f8f5cd1dce3
|
d3c390e53f550f02cbcb57f092e5b5c4f10e3c1d
|
refs/heads/master
| 2020-03-27T09:53:27.215065
| 2019-03-12T16:41:25
| 2019-03-12T16:41:25
| 146,380,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
'author': quote.css('small.author::text').extract_first(),
'tags': quote.css('div.tags a.tag::text').extract(),
}
|
[
"scottgleave@scotts-mbp.lan"
] |
scottgleave@scotts-mbp.lan
|
9ff464e41f5ae195d170b9eb2eb24289ba97d2f2
|
cff529b50ab0eddc0df5a5f929170ebb823b6a87
|
/BinaryTree/bst_min.py
|
921a7cf2550b86b18ef4ffec44c23f4b5bedcc4c
|
[] |
no_license
|
kprashant94/Codes
|
4a4ee26a5f127db94f3473459e3843c9245b2846
|
ba239475a2d77ca8247e5c9685714372b5311a6c
|
refs/heads/master
| 2021-01-11T07:27:23.480847
| 2017-08-14T11:02:02
| 2017-08-14T11:02:02
| 69,662,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
########################################################################
# Title: Binary Search Tree
# Algrithm: Minimum
# Project: codewarm.in
#
# Description:
# 1. Start with root.
# 2. Follow the left child untill the leaf node is reached.
# 3. return the key stored at that leaf node which is the
# minimum element in the tree.
# Time complexity : O(logn)
#
########################################################################
class Node:
def __init__(self,key,left=None,right=None,parent=None):
self.key = key
self.left = left
self.right = right
self.parent = parent
class BinarySearchTree:
def __init__(self):
self.root = None
def min(self):
temp = self._min(self.root)
if temp:
return temp.key
else:
return None
def _min(self,node):
if node:
if node.left:
return self._min(node.left)
else:
return node
else:
return None
|
[
"pkgjnv@gmail.com"
] |
pkgjnv@gmail.com
|
97f4a6b8d5f5b27b834790d8aae3f6e3e017f377
|
65b8e025b81ff70660f9b2ea9396a2c839af290c
|
/tests/test_humidity_ratio.py
|
6c863db899fbab264c7ebb12a3a8d9877264dd3d
|
[
"MIT"
] |
permissive
|
aeieng/python-measurement
|
190d7444a415ceaea662ed1e872d71d4dda34479
|
5a61711fbc728546489bd50bf18eb915eff231d3
|
refs/heads/master
| 2021-05-03T12:13:51.006737
| 2018-02-13T00:09:21
| 2018-02-13T00:09:21
| 120,496,630
| 0
| 0
| null | 2018-02-07T01:00:02
| 2018-02-06T17:29:49
|
Python
|
UTF-8
|
Python
| false
| false
| 374
|
py
|
from .base import MeasurementTestBase
from measurement.measures import HumidityRatio
class HumidityRatioTest(MeasurementTestBase):
def test_humidity_ratio_kwarg(self):
val1 = HumidityRatio(kg__kg=1.0)
val2 = HumidityRatio(grain__lb=7000.0)
self.assertEqual(
round(val1.standard, 6),
round(val2.standard, 6),
)
|
[
"jmcneill@aeieng.com"
] |
jmcneill@aeieng.com
|
21c172b953044d5360e00ceb641b12757fdb8c14
|
8974cb98b1525f425a095f386f9ed4f43ab166af
|
/linearregression.py
|
68cf47b4e0fc9a088978795a552276a6ccbe6b3b
|
[] |
no_license
|
yogeshnimangre007/python-for-web-access
|
52dd6c1bb14feb06aad200ab4eab56d07c2f9bf2
|
7935eee9062f799dea833d3a94cb44d525e8729f
|
refs/heads/master
| 2020-08-08T00:25:48.196516
| 2019-10-11T08:21:21
| 2019-10-11T08:21:21
| 213,640,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
'''
#creating linear regression using skikit learn
from sklearn.linear_model import LinearRegression
lm=LinearRegression()
X = df[['highway-mpg']]
Y = df['price']
lm.fit(X, Y)
Yhat=lm.predict(X)
'''
'''
#creating a regression plot it gives the line made
import eaborn as sns
sns.regplot(x='highway-mpg',y='price',data=df)
plt.ylim(0,)
#creating residual plot (diferance between predicted and actual value) wrt to x or value at x axis ..uniform distribution means our rgresion is good
sns.residplot(df['highway-mpg'],df['price'])
'''
'''
#creating distribution plot (gives direct picture of actuall value and pridiction value in graphical way)
import seaborn as sns
axl=sns.distplot(df['price'],hist=False,color="r",label="actual values")
sns.distplot(Yhat,hist=False,color="b",label="fited value",ax=axl)
'''
'''
#creating polynomial regression
import npm as np
f=np.polyfit(x,y,3)
n=np.polydl(f)
print(n)
#this cannnot do multivariest polynomial regression for this implement follaowing code
from sklearn.preprocessing import PolynomialFeatures
pr=PolynomialFeatures(degree=2)
x_polly=pr.fit_transform(x[['horse-power','curb-weight']],include_bias=False)
'''
'''
#creating pipeline reduces both efforts of normalization and polynomalization directly after we can do linear regression
import sklearn
input=[('scale',StandardScaler()),('polynomial',PolynomialFeatures()] #also add other features you need to add.
pipe=pipeline(input)
#so does it continue most important is it eas the code
'''
'''
#mostly model is evaluted by mse i.e mean square and second is r square
#r sqare implementation is.
lm = LinearRegression()
lm.score(X,y)
X = df[['highway-mpg']]
Y = df['price']
lm.fit(X, Y)
out=lm.score(X,y)
'''
|
[
"yogeshnimangre@YOGESHs-MacBook-Pro.local"
] |
yogeshnimangre@YOGESHs-MacBook-Pro.local
|
f2515b3ea9d81b413d7f16c3fd76965b099723a9
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_5/ar_12/test_artificial_1024_Fisher_Lag1Trend_5_12_0.py
|
cdbf8396fc2e08ebfbdd54ac8c3f8c8a7b230896
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
1a41bd25d395783d808bbe7baa3ab53534669a7e
|
f5a82f7b2695ed08c9f7432013889590ed9cd1d0
|
/healthpoint/decorators.py
|
17bb337812dcdbf86156385ff894f6a57f2c31fe
|
[
"MIT"
] |
permissive
|
lordoftheflies/django-healthpoint
|
bb717f3a4f9a96b9d81f10fbb45e6982c020e93b
|
aaf8c77150b2ae5bf7d3f9050841b885e8cda17a
|
refs/heads/master
| 2020-08-03T02:55:15.244656
| 2019-09-18T16:13:10
| 2019-09-18T16:13:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from functools import wraps
from healthpoint.registry import register_health_check
def health_check(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
if isinstance(result, bool):
success, detail = result, 'OK' if result else 'ERROR'
elif isinstance(result, tuple) and len(result) == 2:
success, detail = result
else:
raise ValueError(
'Your @health_check must return'
' a `bool`, or a tuple of (`bool`, `detail`)')
except Exception as e:
success, detail = False, str(e)
return success, detail
register_health_check(wrapper)
return wrapper
|
[
"raymond.penners@intenct.nl"
] |
raymond.penners@intenct.nl
|
30b9875c668ac718286b46df876fdd9aedeea679
|
6a24fbbc8f5a9ee7b08080e0c81cdbc85caca935
|
/bEncode.py
|
eaf1e7b86ba7447bd2a6a0d118d8ac23cb29e430
|
[] |
no_license
|
Technochrome/pyTorrent
|
dcbbbd7d59af38d2d3b8ae63186afef3f0e0f991
|
fd79ed25fd1be32acbfd898d486cd315cb9a9d44
|
refs/heads/master
| 2020-06-01T16:06:59.822153
| 2013-11-13T23:26:16
| 2013-11-13T23:26:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
import sys
import StringIO
import string
def peek(io):
p = io.tell()
c = io.read(1)
io.seek(p)
return c
def bytesToHex(bytes):
itoch = lambda i: "0123456789ABCDEF"[i]
return string.join([itoch((c>>4)&0xf) + itoch(c&0xf) for c in bytes],'')
def printBencode(d,tab='',listLen=10,byteLen=40):
if isinstance(d,dict):
print tab+'{'
tab += '\t'
for key in d:
if isinstance(d[key],(dict,list,tuple)):
print tab,key,'='
printBencode(d[key],tab+'\t',listLen,byteLen)
else:
if not key.startswith('__raw_'):
printBencode(d[key],tab+key+' =',listLen,byteLen)
print tab[:-1],'}'
elif isinstance(d,(list,tuple)):
print tab,'['
for (i,e) in enumerate(d):
if i > listLen:
print tab+'\t...'
print tab,'\tand %d more' %(len(d)-listLen)
break
printBencode(e,tab+'\t')
print tab,']'
elif isinstance(d,bytearray):
bytes = bytesToHex(d)
if len(bytes)>byteLen:
print tab,bytes[0:byteLen],'...'
else:
print tab,bytes
elif isinstance(d,basestring):
print tab,repr(d)
else:
print tab,d
def bDecodeFile(io):
def _readInt(io):
i = 0
while peek(io).isdigit():
i = i*10 + ord(io.read(1)) - ord('0')
return i
c = peek(io)
if c == 'd': # dict
io.read(1)
ret = {}
while peek(io) != 'e':
key = bDecodeFile(io)
s = io.tell()
value = bDecodeFile(io)
ret[key] = value
e = io.tell()
#save raw version of entry, necessary for info_hash key
io.seek(s)
ret['__raw_'+key] = io.read(e-s)
io.read(1)
return ret
elif c == 'i': # int
io.read(1)
i = _readInt(io)
io.read(1) # e
return i
elif c == 'l': # list
io.read(1)
ret = []
while peek(io) != 'e':
ret.append(bDecodeFile(io))
io.read(1) # e
return ret
else: # raw data
dLen = _readInt(io)
io.read(1) # :
content = io.read(dLen)
return content
def bDecode(string):
return bDecodeFile(StringIO.StringIO(string))
if __name__ == "__main__":
if len(sys.argv) == 2:
with open(sys.argv[1], "r") as myfile:
d = bDecode(myfile.read())
for key in d:
print key
for key in d['info']:
print '-',key
|
[
"flamenbird@gmail.com"
] |
flamenbird@gmail.com
|
f4d2d7900928d741f21df8df26d01ac16b7b4688
|
03eabd423e813efe497ee7c02889b1a14c12e5a1
|
/aoc2020/day18/__init__.py
|
3b9ea28467a9a1c460e451f35716c52f9780b979
|
[] |
no_license
|
sodle/advent-of-code-2020
|
fd6b68b2fe9a994588840a9afde837249132a91a
|
26f5704b063272f8a6128b64d7a60172b0bfaa39
|
refs/heads/main
| 2023-02-02T22:05:24.959634
| 2020-12-20T20:09:49
| 2020-12-20T20:09:49
| 317,423,823
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,011
|
py
|
def evaluate_expression(expr: str) -> int:
while "(" in expr:
l_parens = 1
r_parens = 0
open_paren_idx = expr.index("(")
idx = open_paren_idx + 1
while l_parens != r_parens:
if expr[idx] == "(":
l_parens += 1
elif expr[idx] == ")":
r_parens += 1
idx += 1
paren_value = evaluate_expression(str(expr[open_paren_idx + 1:idx - 1]))
expr = f"{expr[:open_paren_idx]}{paren_value}{expr[idx:]}"
expr = expr.split(" ")
while len(expr) > 1:
a, operator, b = expr[:3]
if operator == "+":
value = int(a) + int(b)
else:
value = int(a) * int(b)
expr = [value, *expr[3:]]
return expr[0]
def evaluate_expression_part2(expr: str) -> int:
while "(" in expr:
l_parens = 1
r_parens = 0
open_paren_idx = expr.index("(")
idx = open_paren_idx + 1
while l_parens != r_parens:
if expr[idx] == "(":
l_parens += 1
elif expr[idx] == ")":
r_parens += 1
idx += 1
paren_value = evaluate_expression_part2(str(expr[open_paren_idx + 1:idx - 1]))
expr = f"{expr[:open_paren_idx]}{paren_value}{expr[idx:]}"
expr = expr.split(" ")
while "+" in expr:
oper_idx = expr.index("+")
a = int(expr[oper_idx - 1])
b = int(expr[oper_idx + 1])
result = a + b
expr = [*expr[:oper_idx - 1], result, *expr[oper_idx + 2:]]
while "*" in expr:
oper_idx = expr.index("*")
a = int(expr[oper_idx - 1])
b = int(expr[oper_idx + 1])
result = a * b
expr = [*expr[:oper_idx - 1], result, *expr[oper_idx + 2:]]
return expr[0]
def part1(expressions: [str]) -> int:
return sum(evaluate_expression(expr) for expr in expressions)
def part2(expressions: [str]) -> int:
return sum(evaluate_expression_part2(expr) for expr in expressions)
|
[
"scott@sjodle.com"
] |
scott@sjodle.com
|
27cf085e94189d07b34a451a64d689a0e82d923d
|
cb3b8f86dc9b2bf2b5dd614dda7bbedeea9105d9
|
/nyural/optimizers.py
|
0c294c54d068d78cc8718ee5d37a0cb2448a7421
|
[] |
no_license
|
Sandy4321/nyural
|
34e85aa4ffe1264725834ef5261885ce9ddbbef9
|
acd423c17c7e68c773024f12a32b53efda634c42
|
refs/heads/master
| 2021-01-14T10:18:26.073554
| 2015-10-26T02:20:54
| 2015-10-26T02:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,010
|
py
|
from collections import defaultdict
import numpy as np
class Optimizer(object):
# note: optimizers don't actually need to inherit from this class, this
# just demonstrates the minimum structure that is expected
def __init__(self):
"""Initialize the optimizer with whatever parameters are appropriate.
"""
# the network will be set when the optimizer is added to a network
self.net = None
def compute_update(self, printing=False):
"""Compute a weight update for the current batch.
It can be assumed that the batch has already been stored in net.inputs
and net.targets, and the nonlinearity activations/derivatives for
the batch are cached in net.activations and net.d_activations.
:param printing: if True, print out data about the optimization
"""
raise NotImplementedError()
class HessianFree(Optimizer):
def __init__(self, CG_iter=250, init_damping=1,
struc_damping=None, plotting=True):
"""Use Hessian-free optimization to compute the weight update.
Based on
Martens, J. (2010). Deep learning via Hessian-free optimization. In
Proceedings of the 27th International Conference on Machine Learning.
:param CG_iter: the maximum number of CG iterations to run per epoch
:param init_damping: the initial value of the Tikhonov damping
:param struc_damping: scale on structural damping, relative to
Tikhonov damping (only used in recurrent nets)
:param plotting: if True then collect data for plotting (actual
plotting handled in parent network)
"""
super(HessianFree, self).__init__()
self.CG_iter = CG_iter
self.init_delta = None
self.damping = init_damping
self._struc_damping = struc_damping
self.plotting = plotting
self.plots = defaultdict(list)
def compute_update(self, printing=False):
err = self.net.error() # note: don't reuse previous error (diff batch)
# compute gradient
grad = self.net.calc_grad()
if printing:
print "initial err", err
print "grad norm", np.linalg.norm(grad)
# run CG
if self.init_delta is None:
self.init_delta = np.zeros_like(self.net.W)
deltas = self.conjugate_gradient(self.init_delta * 0.95, grad,
iters=self.CG_iter)
if printing:
print "CG steps", deltas[-1][0]
self.init_delta = deltas[-1][1] # note: don't backtrack this
# CG backtracking
new_err = np.inf
for j in range(len(deltas) - 1, -1, -1):
prev_err = self.net.error(self.net.W + deltas[j][1])
# note: we keep using the cached inputs, not rerunning the plant
# (if there is one). that is, we are evaluating whether the input
# improves on those inputs, not whether it improves the overall
# objective. we could do the latter instead, but it makes things
# more prone to instability.
if prev_err > new_err:
break
delta = deltas[j][1]
new_err = prev_err
else:
j -= 1
if printing:
print "using iteration", deltas[j + 1][0]
print "backtracked err", new_err
# update damping parameter (compare improvement predicted by
# quadratic model to the actual improvement in the error)
denom = (0.5 * np.dot(delta,
self.net.calc_G(delta, damping=self.damping)) +
np.dot(grad, delta))
improvement_ratio = (new_err - err) / denom if denom != 0 else 1
if improvement_ratio < 0.25:
self.damping *= 1.5
elif improvement_ratio > 0.75:
self.damping *= 0.66
if printing:
print "improvement_ratio", improvement_ratio
print "damping", self.damping
# line search to find learning rate
l_rate = 1.0
min_improv = min(1e-2 * np.dot(grad, delta), 0)
for _ in range(60):
# check if the improvement is greater than the minimum
# improvement we would expect based on the starting gradient
if new_err <= err + l_rate * min_improv:
break
l_rate *= 0.8
new_err = self.net.error(self.net.W + l_rate * delta)
else:
# no good update, so skip this iteration
l_rate = 0.0
new_err = err
if printing:
print "min_improv", min_improv
print "l_rate", l_rate
print "l_rate err", new_err
print "improvement", new_err - err
if self.plotting:
self.plots["training error"] += [new_err]
self.plots["learning rate"] += [l_rate]
self.plots["damping"] += [self.damping]
self.plots["CG iterations"] += [deltas[-1][0]]
return l_rate * delta
def conjugate_gradient(self, init_delta, grad, iters=250):
"""Find minimum of quadratic approximation using conjugate gradient
algorithm."""
store_iter = 5
store_mult = 1.3
deltas = []
vals = np.zeros(iters, dtype=self.net.dtype)
base_grad = -grad
delta = init_delta
residual = base_grad - self.net.calc_G(init_delta, damping=self.damping)
res_norm = np.dot(residual, residual)
direction = residual.copy()
if self.net.debug:
self.net.check_grad(grad)
for i in range(iters):
if self.net.debug:
print "-" * 20
print "CG iteration", i
print "delta norm", np.linalg.norm(delta)
print "direction norm", np.linalg.norm(direction)
G_dir = self.net.calc_G(direction, damping=self.damping)
# calculate step size
step = res_norm / np.dot(direction, G_dir)
if self.net.debug:
print "step", step
self.net.check_G(G_dir, direction, self.damping)
assert np.isfinite(step)
assert step >= 0
assert (np.linalg.norm(np.dot(direction, G_dir)) >=
np.linalg.norm(np.dot(direction,
self.net.calc_G(direction,
damping=0))))
# update weight delta
delta += step * direction
# update residual
residual -= step * G_dir
new_res_norm = np.dot(residual, residual)
if new_res_norm < 1e-20:
# early termination (mainly to prevent numerical errors);
# if this ever triggers, it's probably because the minimum
# gap in the normal termination condition (below) is too low.
# this only occurs on really simple problems
break
# update direction
beta = new_res_norm / res_norm
direction *= beta
direction += residual
res_norm = new_res_norm
if i == store_iter:
deltas += [(i, np.copy(delta))]
store_iter = int(store_iter * store_mult)
# martens termination conditions
vals[i] = -0.5 * np.dot(residual + base_grad, delta)
gap = max(int(0.1 * i), 10)
if self.net.debug:
print "termination val", vals[i]
if (i > gap and vals[i - gap] < 0 and
(vals[i] - vals[i - gap]) / vals[i] < 5e-6 * gap):
break
deltas += [(i, np.copy(delta))]
return deltas
@property
def struc_damping(self):
if self._struc_damping is None:
return None
return self.damping * self._struc_damping
class SGD(Optimizer):
def __init__(self, l_rate=1, plotting=False):
"""Compute weight update using first-order gradient descent.
:param l_rate: learning rate to apply to weight updates
:param plotting: if True then collect data for plotting (actual
plotting handled in parent network)
"""
super(SGD, self).__init__()
self.l_rate = l_rate
self.plotting = plotting
self.plots = defaultdict(list)
def compute_update(self, printing=False):
grad = self.net.calc_grad()
if self.net.debug:
self.net.check_grad(grad)
if printing:
train_err = self.net.error()
print "training error", train_err
# note: for SGD we'll just do the plotting when we print (since
# we're going to be doing a lot more, and smaller, updates)
if self.plotting:
self.plots["training error"] += [train_err]
return -self.l_rate * grad
|
[
"nyalta21@gmail.com"
] |
nyalta21@gmail.com
|
e758aae3a0cdf6b7aa2f94ab6feb482aa02fcc0d
|
9288d0af61fb8fdaef6834472cd0085a6e5db61a
|
/demos/staticmethodsDemo.py
|
2b5cb6d014496e660580ed6c0d467d921188f5d2
|
[] |
no_license
|
Xerrex/OOP_python
|
3ed17dc695a809a2c3e091df8a11b154f51e5a88
|
1610ec74eac5dac4f07e4a85a3ea2acde9ff5d36
|
refs/heads/master
| 2021-09-02T05:17:30.128711
| 2017-12-30T18:17:30
| 2017-12-30T18:17:30
| 115,800,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
#! /usr/bin/python
from juice.Employee import Employee
import datetime
myday = datetime.date(2017,12,31)
print(myday)
print(Employee.is_workingday(myday))
|
[
"alexkagai@outlook.com"
] |
alexkagai@outlook.com
|
2c1fc8d25010246935865616a7f2d77dbf36a205
|
ff739149fb1091fcd090b5e68ab4b98d9fec9262
|
/tests/unit/test_sitemap.py
|
7f58445883b0626a64a1c800b55009991b5a7c33
|
[
"MIT"
] |
permissive
|
zhuoranmusic/dash-docs
|
dcdab8a5543f6f3f10cb20d196148969bfe01943
|
3518869b195a7827fe661a90f9a2054c31680d44
|
refs/heads/master
| 2022-04-18T17:37:44.647847
| 2020-04-20T18:13:14
| 2020-04-20T18:13:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
import pytest
import sys
from generate_sitemap import create_sitemap
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_sitemap_is_updated():
with open('dash_docs/assets/sitemap.xml', 'r') as f:
saved_sitemap = f.read()
assert create_sitemap() == saved_sitemap
|
[
"chris@plot.ly"
] |
chris@plot.ly
|
4d052fe88307855300ccd985d3126d99eccec8f2
|
54c7aee57c82b0d449e07bb28f595fea440c7e6e
|
/lacusClient_p2pTest/app_infrastructure/networkManagement/restClient.py
|
fe56d9fd9a4370b2bb3453ff0f64103faa24d6f8
|
[
"MIT"
] |
permissive
|
tavog96/distribuidosProyecto
|
2a905fa9410221ad8fc316746080795738de4f90
|
8aee06ca580389412809353ac312c417aa1163fa
|
refs/heads/master
| 2020-07-30T11:49:09.353180
| 2019-11-12T21:32:09
| 2019-11-12T21:32:09
| 210,223,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
from ...app_core.interfaces.restClientControllerInterface import restClientControllerInterface
import requests
import json
class restClientController (restClientControllerInterface):
defaultPort = 50505
remoteHostIP = ''
def __init__(self, defaultPort, remoteHostIP):
super().__init__()
self.defaultPort = defaultPort
self.remoteHostIP = remoteHostIP
def getRemoteResourceList (self):
url = "http://"+self.remoteHostIP+':'+str(self.defaultPort)+'/resource'
response = requests.get(url)
if response.ok:
responseContent = json.loads(response.content)
return responseContent
return False
def getRemoteResourceCacheInfo (self, uidParam):
url = "http://"+self.remoteHostIP+':'+str(self.defaultPort)+'/resource/'+uidParam
response = requests.get(url)
if response.ok:
responseContent = json.loads(response.content)
return responseContent
return False
def getRemoteChunkFile (self, uidParam, chunkNumber):
url = "http://"+self.remoteHostIP+':'+str(self.defaultPort)+'/chunk/'+uidParam+'/'+str(chunkNumber)
response = requests.get(url)
if response.ok:
return response.content
return False
|
[
"tavog96@gmail.com"
] |
tavog96@gmail.com
|
0a5aa2fd854e65b4f7d5edabd1f8eb9ac9b50a6f
|
6d3279de4d94795fd015e3d5ebbb68eef5253720
|
/labs/python_basics/xls_to_csv.py
|
985083153c1c1d7dfd80f63fb06b5af3e6f55c74
|
[] |
no_license
|
GerganaTancheva123/ML_Python
|
a3fedd4d47d2cb41be06fe5049f4b240785958a0
|
379419e1d27d1d33622e69363fcc69b9a8975311
|
refs/heads/master
| 2022-03-09T04:28:29.574624
| 2022-02-18T16:42:51
| 2022-02-18T16:42:51
| 249,216,462
| 0
| 1
| null | 2020-09-26T19:11:43
| 2020-03-22T15:47:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
import pandas as pd
df = pd.read_excel('../../project/datasets/ames_datapreprocessing_knime.xlsx', encoding='utf-8', index=False);
df.to_csv('../../project/datasets/ames_datapreprocessing_knime.csv', encoding='utf-8')
|
[
"gerganatancheva1@gmail.com"
] |
gerganatancheva1@gmail.com
|
aa9facefd2669ed057397d86449409e25ed9d148
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/_farm_beats_client.py
|
e987fc646fb78c5fa674aa650dfafae923c7d7bb
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 9,228
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import FarmBeatsClientConfiguration
from .operations import ApplicationDataOperations
from .operations import AttachmentsOperations
from .operations import BoundariesOperations
from .operations import CropsOperations
from .operations import CropVarietiesOperations
from .operations import FarmersOperations
from .operations import FarmOperationsOperations
from .operations import FarmsOperations
from .operations import FieldsOperations
from .operations import HarvestDataOperations
from .operations import ImageProcessingOperations
from .operations import OAuthProvidersOperations
from .operations import OAuthTokensOperations
from .operations import PlantingDataOperations
from .operations import ScenesOperations
from .operations import SeasonalFieldsOperations
from .operations import SeasonsOperations
from .operations import TillageDataOperations
from .operations import WeatherOperations
from . import models
class FarmBeatsClient(object):
"""APIs documentation for Azure AgPlatform DataPlane Service.
:ivar application_data: ApplicationDataOperations operations
:vartype application_data: azure.agrifood.farming.operations.ApplicationDataOperations
:ivar attachments: AttachmentsOperations operations
:vartype attachments: azure.agrifood.farming.operations.AttachmentsOperations
:ivar boundaries: BoundariesOperations operations
:vartype boundaries: azure.agrifood.farming.operations.BoundariesOperations
:ivar crops: CropsOperations operations
:vartype crops: azure.agrifood.farming.operations.CropsOperations
:ivar crop_varieties: CropVarietiesOperations operations
:vartype crop_varieties: azure.agrifood.farming.operations.CropVarietiesOperations
:ivar farmers: FarmersOperations operations
:vartype farmers: azure.agrifood.farming.operations.FarmersOperations
:ivar farm_operations: FarmOperationsOperations operations
:vartype farm_operations: azure.agrifood.farming.operations.FarmOperationsOperations
:ivar farms: FarmsOperations operations
:vartype farms: azure.agrifood.farming.operations.FarmsOperations
:ivar fields: FieldsOperations operations
:vartype fields: azure.agrifood.farming.operations.FieldsOperations
:ivar harvest_data: HarvestDataOperations operations
:vartype harvest_data: azure.agrifood.farming.operations.HarvestDataOperations
:ivar image_processing: ImageProcessingOperations operations
:vartype image_processing: azure.agrifood.farming.operations.ImageProcessingOperations
:ivar oauth_providers: OAuthProvidersOperations operations
:vartype oauth_providers: azure.agrifood.farming.operations.OAuthProvidersOperations
:ivar oauth_tokens: OAuthTokensOperations operations
:vartype oauth_tokens: azure.agrifood.farming.operations.OAuthTokensOperations
:ivar planting_data: PlantingDataOperations operations
:vartype planting_data: azure.agrifood.farming.operations.PlantingDataOperations
:ivar scenes: ScenesOperations operations
:vartype scenes: azure.agrifood.farming.operations.ScenesOperations
:ivar seasonal_fields: SeasonalFieldsOperations operations
:vartype seasonal_fields: azure.agrifood.farming.operations.SeasonalFieldsOperations
:ivar seasons: SeasonsOperations operations
:vartype seasons: azure.agrifood.farming.operations.SeasonsOperations
:ivar tillage_data: TillageDataOperations operations
:vartype tillage_data: azure.agrifood.farming.operations.TillageDataOperations
:ivar weather: WeatherOperations operations
:vartype weather: azure.agrifood.farming.operations.WeatherOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: The endpoint of your FarmBeats resource (protocol and hostname, for example: https://{resourceName}.farmbeats.azure.net).
:type endpoint: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{Endpoint}'
self._config = FarmBeatsClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_data = ApplicationDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.attachments = AttachmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.boundaries = BoundariesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crops = CropsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crop_varieties = CropVarietiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farmers = FarmersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farm_operations = FarmOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farms = FarmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.fields = FieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.harvest_data = HarvestDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.image_processing = ImageProcessingOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_providers = OAuthProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_tokens = OAuthTokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.planting_data = PlantingDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scenes = ScenesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasonal_fields = SeasonalFieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasons = SeasonsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tillage_data = TillageDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.weather = WeatherOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> FarmBeatsClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.