blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f724e169f0e72fb09bce266d3710febb7de1132d | 31eca08712e704566fb924093d7dde2e94348a79 | /examples/generate_from_image.py | fb955999ad646ac5d71ad769fdceaf31f6ea99fe | [
"MIT"
] | permissive | ZhengWenSEC2023/SF_temp | dae2b3282af8867549817d0865d1e2a1a235b90e | 48c31d94bd93bb18c05bfc19a6dab2d2206d6fba | refs/heads/main | 2023-01-08T16:59:28.642007 | 2020-11-04T09:12:25 | 2020-11-04T09:12:25 | 306,948,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | """
9. 19. 2020
by Zheng Wen
This file is used to generate 3D models from input 4-channel images
Run from anaconda console
"""
import torch
import torch.nn.parallel
import examples.recon.datasets as datasets
from examples.recon.utils import AverageMeter, img_cvt
import soft_renderer as sr
import soft_renderer.functional as srf
import examples.recon.models as models
import time
import os
import imageio
import numpy as np
import PIL
BATCH_SIZE = 100
IMAGE_SIZE = 64
CLASS_IDS_ALL = (
'02691156,02828884,02933112,02958343,03001627,03211117,03636649,' +
'03691459,04090263,04256520,04379243,04401088,04530566')
PRINT_FREQ = 100
SAVE_FREQ = 100
MODEL_DIRECTORY = '/mnt/zhengwen/model_synthesis/SoftRas/data/results/models/checkpoint_0210000.pth.tar'
DATASET_DIRECTORY = '/mnt/zhengwen/model_synthesis/SoftRas/data/datasets'
SIGMA_VAL = 0.01
IMAGE_PATH = ''
# arguments
class Args:
experiment_id = 'Sept_18_2020'
model_directory = MODEL_DIRECTORY
dataset_directory = DATASET_DIRECTORY
class_ids = CLASS_IDS_ALL
image_size = IMAGE_SIZE
batch_size = BATCH_SIZE
image_path = IMAGE_PATH
sigma_val = SIGMA_VAL
print_freq = PRINT_FREQ
save_freq = SAVE_FREQ
args = Args()
# setup model & optimizer
model = models.Model('/mnt/zhengwen/model_synthesis/SoftRas/data/obj/sphere/sphere_642.obj', args=args)
model = model.cuda()
state_dicts = torch.load(args.model_directory)
model.load_state_dict(state_dicts['model'], strict=False)
model.eval()
directory_output = '/mnt/zhengwen/model_synthesis/photo_from_life/123'
os.makedirs(directory_output, exist_ok=True)
directory_mesh = os.path.join(directory_output, args.experiment_id)
os.makedirs(directory_mesh, exist_ok=True)
IMG_PATH = '/mnt/zhengwen/model_synthesis/photo_from_life/texture'
end = time.time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses1 = AverageMeter()
iou_all = []
images = []
img_list = sorted(os.listdir(IMG_PATH))
for img_name in img_list:
img = PIL.Image.open(os.path.join(IMG_PATH, img_name))
img = np.asanyarray(img)
images.append(img)
images = np.array(images)
images = images.transpose((0, 3, 1, 2))
images = np.ascontiguousarray(images)
images = torch.from_numpy(images.astype('float32') / 255.)
images = torch.autograd.Variable(images).cuda()
vertices, faces = model.reconstruct(images)
for k in range(len(img_list)):
print(k)
mesh_path = os.path.join(directory_output, img_list[k][:-4] + ".obj")
srf.save_obj(mesh_path, vertices[k], faces[k])
| [
"buaawenz@gmail.com"
] | buaawenz@gmail.com |
ed2c75fad5d5e39f4840aa3b8b02b3ca34f25f33 | abccdbf9b0849b47960c3c352870793405debfed | /0x06-python-classes/5-square.py | 110324fa78283dc13f9f68d46839961d8eb701db | [] | no_license | hunterxx0/holbertonschool-higher_level_programming | 88b1b0f31b536c6940f2e64a6924a06ba9cbf193 | 44064cf0722cd20d93f58b64ab185d2898770d73 | refs/heads/master | 2022-12-20T12:14:15.877147 | 2020-09-24T21:25:54 | 2020-09-24T21:25:54 | 259,276,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | #!/usr/bin/python3
"""a class Square that manages: size
"""
class Square:
"""a class Square with size management
"""
def __init__(self, size=0):
if isinstance(size, int):
if size < 0:
raise TypeError('size must be >= 0')
else:
self.__size = size
else:
raise TypeError('size must be an integer')
def area(self):
return self.__size ** 2
def my_print(self):
if self.size == 0:
print()
for x in range(self.size):
for y in range(self.size):
print('#', end='')
print()
@property
def size(self):
return self.__size
@size.setter
def size(self, size):
if isinstance(size, int):
if size < 0:
raise TypeError('size must be >= 0')
else:
self.__size = size
else:
raise TypeError('size must be an integer')
| [
"azouzimhamed@gmail.com"
] | azouzimhamed@gmail.com |
757629a5694442b320003a93821932d673584ac7 | a5e5f074d1c3ac90ec01114e9efd9e32a4014679 | /config/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfall_io.py | 6617410b1aaefa2f6d0a96bc80676011a7092aa3 | [
"MIT"
] | permissive | mhaack/home-assistant-config | a46a28842d9c71f670bd599c06911bcf4ccffdb5 | 3f15dce23b297c9f080f78171907fc7bde92d4ec | refs/heads/master | 2022-11-04T12:25:38.119829 | 2022-10-28T15:11:56 | 2022-10-28T15:11:56 | 186,481,992 | 33 | 5 | MIT | 2021-05-13T12:15:35 | 2019-05-13T19:24:20 | Python | UTF-8 | Python | false | false | 4,208 | py | import datetime
from html.parser import HTMLParser
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
TITLE = "AbfallPlus"
DESCRIPTION = (
"Source for AbfallPlus.de waste collection. Service is hosted on abfall.io."
)
URL = "https://www.abfallplus.de"
TEST_CASES = {
"Waldenbuch": {
"key": "8215c62763967916979e0e8566b6172e",
"f_id_kommune": 2999,
"f_id_strasse": 1087,
# "f_abfallarten": [50, 53, 31, 299, 328, 325]
},
"Landshut": {
"key": "bd0c2d0177a0849a905cded5cb734a6f",
"f_id_kommune": 2655,
"f_id_bezirk": 2655,
"f_id_strasse": 763,
# "f_abfallarten": [31, 17, 19, 218]
},
"Schoenmackers": {
"key": "e5543a3e190cb8d91c645660ad60965f",
"f_id_kommune": 3682,
"f_id_strasse": "3682adenauerplatz",
"f_id_strasse_hnr": "20417",
# "f_abfallarten": [691,692,696,695,694,701,700,693,703,704,697,699],
},
"Freudenstadt": {
"key": "595f903540a36fe8610ec39aa3a06f6a",
"f_id_kommune": 3447,
"f_id_bezirk": 22017,
"f_id_strasse": 22155,
},
"Ludwigshafen am Rhein": {
"key": "6efba91e69a5b454ac0ae3497978fe1d",
"f_id_kommune": "5916",
"f_id_strasse": "5916abteistrasse",
"f_id_strasse_hnr": 33,
},
"Traunstein": {
"key": "279cc5db4db838d1cfbf42f6f0176a90",
"f_id_kommune": "2911",
"f_id_strasse": "2374",
},
}
MODUS_KEY = "d6c5855a62cf32a4dadbc2831f0f295f"
HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
# Parser for HTML input (hidden) text
class HiddenInputParser(HTMLParser):
def __init__(self):
super().__init__()
self._args = {}
@property
def args(self):
return self._args
def handle_starttag(self, tag, attrs):
if tag == "input":
d = dict(attrs)
if d["type"] == "hidden":
self._args[d["name"]] = d["value"]
class Source:
def __init__(
self,
key,
f_id_kommune,
f_id_strasse,
f_id_bezirk=None,
f_id_strasse_hnr=None,
f_abfallarten=[],
):
self._key = key
self._kommune = f_id_kommune
self._bezirk = f_id_bezirk
self._strasse = f_id_strasse
self._strasse_hnr = f_id_strasse_hnr
self._abfallarten = f_abfallarten # list of integers
self._ics = ICS()
def fetch(self):
# get token
params = {"key": self._key, "modus": MODUS_KEY, "waction": "init"}
r = requests.post("https://api.abfall.io", params=params, headers=HEADERS)
# add all hidden input fields to form data
# There is one hidden field which acts as a token:
# It consists of a UUID key and a UUID value.
p = HiddenInputParser()
p.feed(r.text)
args = p.args
args["f_id_kommune"] = self._kommune
args["f_id_strasse"] = self._strasse
if self._bezirk is not None:
args["f_id_bezirk"] = self._bezirk
if self._strasse_hnr is not None:
args["f_id_strasse_hnr"] = self._strasse_hnr
for i in range(len(self._abfallarten)):
args[f"f_id_abfalltyp_{i}"] = self._abfallarten[i]
args["f_abfallarten_index_max"] = len(self._abfallarten)
args["f_abfallarten"] = ",".join(map(lambda x: str(x), self._abfallarten))
now = datetime.datetime.now()
date2 = now.replace(year=now.year + 1)
args["f_zeitraum"] = f"{now.strftime('%Y%m%d')}-{date2.strftime('%Y%m%d')}"
params = {"key": self._key, "modus": MODUS_KEY, "waction": "export_ics"}
# get csv file
r = requests.post(
"https://api.abfall.io", params=params, data=args, headers=HEADERS
)
# parse ics file
r.encoding = "utf-8" # requests doesn't guess the encoding correctly
ics_file = r.text
dates = self._ics.convert(ics_file)
entries = []
for d in dates:
entries.append(Collection(d[0], d[1]))
return entries
| [
"markus.haack@gmail.com"
] | markus.haack@gmail.com |
3ec345329b50b046e56932d8114be7f29c685799 | 8f6a9ff4c63fd24d145088077d5da1c3e4caaa3a | /notebook/investment/gold/goldDatamaintain.py | cba86364408209959a14a2327fce2bd61213f313 | [] | no_license | liaofuwei/pythoncoding | 6fd2afba0d27c4a4bbb4b2d321b3fa402a60d6fe | 966bd99459be933cf48287412a40e0c7a3d0b8e5 | refs/heads/master | 2021-07-15T10:34:57.701528 | 2017-10-10T05:27:13 | 2017-10-10T05:27:13 | 107,651,470 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,303 | py | __author__ = 'Administrator'
import quandl
import datetime
import pandas as pd
from bs4 import BeautifulSoup
import requests
import time
import urllib
import numpy as np
'''
从新浪行情获取comex 黄金实时数据
并以添加模式写入到csv中
'''
COMEXgold_Url ="http://hq.sinajs.cn/list=hf_GC"
COMEXgold_web_data = requests.get(COMEXgold_Url)
COMEXgold_soup = BeautifulSoup(COMEXgold_web_data.text,"lxml")
rawdata=COMEXgold_soup.get_text()
rawdataList=rawdata.split("\"")
data = rawdataList[1].split(",")
if len(data)==14:
comex_gold={
"real_price":data[0],
"price_change":data[1],
"buy_price":data[2],
"sell_price":data[3],
"high":data[4],
"low":data[5],
"time":data[6],
"close_price":data[7],
"open_price":data[8],
"pisition":data[9],
"date":data[12]
}
#comexData=pd.Series(comex_gold,index=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
# 'open_price','pisition'])
comexData=pd.DataFrame(comex_gold,columns=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
'open_price','pisition'],index=[0])
dt=str(datetime.datetime.now().date())
#comexData.to_csv("E:\\pythoncoding\\data\\gold\\COMEX_gold_"+dt+".csv")
#以添加模式加入到csv文件中
comexData.to_csv("E:\\pythoncoding\\data\\gold\\COMEX_gold_"+dt+".csv",mode='a',header=False)
'''
从新浪行情获取london 黄金实时数据
并以添加模式写入到csv中
'''
Londongold_Url ="http://hq.sinajs.cn/list=hf_XAU"
Londongold_web_data = requests.get(Londongold_Url)
Londongold_soup = BeautifulSoup(Londongold_web_data.text,"lxml")
rawdata=Londongold_soup.get_text()
rawdataList=rawdata.split("\"")
data = rawdataList[1].split(",")
if len(data)==14:
London_gold={
"real_price":data[0],
"price_change":data[1],
"buy_price":data[2],
"sell_price":data[3],
"high":data[4],
"low":data[5],
"time":data[6],
"close_price":data[7],
"open_price":data[8],
"pisition":data[9],
"date":data[12]
}
#comexData=pd.Series(comex_gold,index=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
# 'open_price','pisition'])
LondonData=pd.DataFrame(London_gold,columns=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
'open_price','pisition'],index=[0])
dt=str(datetime.datetime.now().date())
#LondonData.to_csv("E:\\pythoncoding\\data\\gold\\London_gold_"+dt+".csv")
#以添加模式加入到csv文件中
LondonData.to_csv("E:\\pythoncoding\\data\\gold\\London_gold_"+dt+".csv",mode='a',header=False)
'''
从新浪行情获取comex 白银实时数据
并以添加模式写入到csv中
'''
COMEXsilver_Url ="http://hq.sinajs.cn/list=hf_SI"
COMEXsilver_web_data = requests.get(COMEXsilver_Url)
COMEXsilver_soup = BeautifulSoup(COMEXsilver_web_data.text,"lxml")
rawdata=COMEXsilver_soup.get_text()
rawdataList=rawdata.split("\"")
data = rawdataList[1].split(",")
if len(data)==14:
comex_silver={
"real_price":data[0],
"price_change":data[1],
"buy_price":data[2],
"sell_price":data[3],
"high":data[4],
"low":data[5],
"time":data[6],
"close_price":data[7],
"open_price":data[8],
"pisition":data[9],
"date":data[12]
}
#comexData=pd.Series(comex_gold,index=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
# 'open_price','pisition'])
comexData=pd.DataFrame(comex_silver,columns=['date','time','real_price','price_change','buy_price','sell_price','high','low','close_price',\
'open_price','pisition'],index=[0])
dt=str(datetime.datetime.now().date())
#comexData.to_csv("E:\\pythoncoding\\data\\gold\\COMEX_silver_"+dt+".csv")
#以添加模式加入到csv文件中
comexData.to_csv("E:\\pythoncoding\\data\\gold\\COMEX_silver_"+dt+".csv",mode='a',header=False)
'''
从新浪行情获取上海金交所 黄金T+D 实时数据
并以添加模式写入到csv中
'''
shanghaigold_TD_Url ="http://hq.sinajs.cn/list=SGE_AUTD"
shanghaigoldTD_web_data = requests.get(shanghaigold_TD_Url)
shanghaigoldTD_soup = BeautifulSoup(shanghaigoldTD_web_data.text,"lxml")
rawdata=shanghaigoldTD_soup.get_text()
rawdataList=rawdata.split("\"")
data = rawdataList[1].split(",")
if len(data)==18:
shanghai_goldTD={
"real_price":data[3],
"price_change":data[4],
"buy_price":data[10],
"sell_price":data[11],
"high":data[7],
"low":data[8],
"close_price":data[9],
"open_price":data[6],
"pisition":data[14],
"date":data[16]
}
#shanghaiData=pd.Series(shanghai_goldTD,index=['date','real_price','price_change','buy_price','sell_price','high','low','close_price',\
# 'open_price','pisition'])
shanghaiData=pd.DataFrame(shanghai_goldTD,columns=['date','real_price','price_change','buy_price','sell_price','high','low','close_price',\
'open_price','pisition'],index=[0])
dt=str(datetime.datetime.now().date())
#shanghaiData.to_csv("E:\\pythoncoding\\data\\gold\\shanghai_goldTD_"+dt+".csv")
#以添加模式加入到csv文件中
shanghaiData.to_csv("E:\\pythoncoding\\data\\gold\\shanghai_goldTD_"+dt+".csv",mode='a',header=False)
'''
从新浪行情获取上海金交所 白银T+D 实时数据
并以添加模式写入到csv中
'''
shanghaiSilver_TD_Url ="http://hq.sinajs.cn/list=SGE_AGTD"
shanghaiSilverTD_web_data = requests.get(shanghaiSilver_TD_Url)
shanghaiSilverTD_soup = BeautifulSoup(shanghaiSilverTD_web_data.text,"lxml")
rawdata=shanghaiSilverTD_soup.get_text()
rawdataList=rawdata.split("\"")
data = rawdataList[1].split(",")
if len(data)==18:
shanghai_silverTD={
"real_price":data[3],
"price_change":data[4],
"buy_price":data[10],
"sell_price":data[11],
"high":data[7],
"low":data[8],
"close_price":data[9],
"open_price":data[6],
"pisition":data[14],
"date":data[16]
}
#shanghaiData=pd.Series(shanghai_silverTD,index=['date','real_price','price_change','buy_price','sell_price','high','low','close_price',\
# 'open_price','pisition'])
shanghaiData=pd.DataFrame(shanghai_silverTD,columns=['date','real_price','price_change','buy_price','sell_price','high','low','close_price',\
'open_price','pisition'],index=[0])
dt=str(datetime.datetime.now().date())
#shanghaiData.to_csv("E:\\pythoncoding\\data\\gold\\shanghai_silverTD_"+dt+".csv")
#以添加模式加入到csv文件中
shanghaiData.to_csv("E:\\pythoncoding\\data\\gold\\shanghai_silverTD_"+dt+".csv",mode='a',header=False)
| [
"459193023@qq.com"
] | 459193023@qq.com |
7b2f0d7f8f0495df550f83cadc62f3dae6e2b2d2 | 000637e87e6483528549fdfb640681296e9d339b | /plugins/hyperlinks.py | d84ad008d65567593129370e3cc5caa17e63d12b | [
"Apache-2.0"
] | permissive | tarunjindl/cosr-back | c448666faaa101a94b68ad80e11c0728f7f9e603 | 45f87e80dff6d1d3d04ceeb2e3d4e430c6995a2b | refs/heads/master | 2021-01-14T14:32:49.031752 | 2016-07-30T06:08:50 | 2016-07-30T06:08:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | from pyspark.sql import types as SparkTypes
from cosrlib.plugins import Plugin
from cosrlib.spark import sql
class MostExternallyLinkedPages(Plugin):
""" Saves a list of most externally linked pages on a domain """
hooks = frozenset(["document_post_index", "spark_pipeline_action", "spark_pipeline_init"])
def spark_pipeline_init(self, sc, sqlc, schema, indexer):
schema.append(SparkTypes.StructField("external_links", SparkTypes.ArrayType(SparkTypes.StructType([
SparkTypes.StructField("href", SparkTypes.StringType(), nullable=False)
# TODO: link text
])), nullable=True))
def document_post_index(self, document, metadata):
""" Filters a document post-indexing """
metadata["external_links"] = [
{"href": row["href"].url} for row in document.get_external_hyperlinks()
]
def spark_pipeline_action(self, sc, sqlc, df, indexer):
domain = self.args["domain"]
if self.args.get("shuffle_partitions"):
sqlc.setConf("spark.sql.shuffle.partitions", self.args["shuffle_partitions"])
lines_df = sql(sqlc, """
SELECT
CONCAT(
regexp_replace(url_to, "^http(s?)://", ""),
" ",
COUNT(*),
" ",
CONCAT_WS(" ", COLLECT_LIST(url_from))
) r
FROM (
SELECT url url_from, EXPLODE(external_links.href) url_to
FROM df
WHERE size(external_links) > 0
) links
WHERE SUBSTRING(
PARSE_URL(links.url_to, "HOST"),
LENGTH(PARSE_URL(links.url_to, "HOST")) - %s,
%s
) == "%s"
GROUP BY regexp_replace(url_to, "^http(s?)://", "")
ORDER BY COUNT(*) DESC
""" % (len(domain), len(domain), domain), {"df": df})
if self.args.get("limit"):
lines_df = lines_df.limit(int(self.args["limit"]))
if self.args.get("partitions"):
lines_df = lines_df.coalesce(int(self.args["partitions"]))
lines_df.persist()
print "Number of destination URLs: %s" % lines_df.count()
if self.args.get("coalesce"):
lines_df = lines_df.coalesce(int(self.args["coalesce"]))
lines_df.write.text(
self.args["path"],
compression="gzip" if self.args.get("gzip") else "none"
)
return True
| [
"sylvain@sylvainzimmer.com"
] | sylvain@sylvainzimmer.com |
be5bddd74acf5188ae2561466ed2e19a33e6b684 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_094/ch74_2019_04_04_17_13_33_711735.py | d30f9fcfe2f3f9495b90198d21862f75a8a91b1d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | palavra = list(input("me fale uma palavra: "))
def conta_bigramas(palavra):
contagem = {}
i = 0
while i<len(palavra):
bi = [''.join(palavra[i:i+2])]
for e in bi:
if e in contagem:
contagem[e]+=1
else:
contagem[e]=1
i+=1
return contagem | [
"you@example.com"
] | you@example.com |
e518acd1066a2edf76fba8199bed5203dc37361c | a0b78bdb544958e454f331dc9f33f740b6dbb652 | /rosshow/src/librosshow/plotters.py | 87f0c0533df3ef73e0f44f529159c89ec32161c9 | [
"BSD-3-Clause"
] | permissive | AcutronicRobotics/rosshow | e870b45b2631a696ac1dcbe36e47527627147a36 | df6dafa64d87b5ac542fab7902a83c57d17de909 | refs/heads/master | 2020-05-04T09:10:58.498006 | 2019-04-01T18:31:00 | 2019-04-01T18:31:00 | 179,062,920 | 6 | 3 | null | 2019-04-02T11:26:39 | 2019-04-02T11:26:39 | null | UTF-8 | Python | false | false | 3,524 | py | import math
import numpy as np
import librosshow.termgraphics as termgraphics
class AnglePlotter(object):
def __init__(self, g, left = 0, right = 1, top = 0, bottom = 1):
self.g = g
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.angle = 0
def update(self, angle):
self.angle = angle
def plot(self):
width, height = self.g.shape
self.g.rect(
(int(self.left), int(self.top)),
(int(self.right), int(self.bottom)),
)
self.g.line(
(int(1 + self.left + (self.right - self.left)/2.0 - (self.right - self.left)/2.0*math.cos(self.angle)),
int(1 + self.top + (self.bottom - self.top)/2.0 + (self.bottom - self.top)/2.0*math.sin(self.angle))),
(int(1 + self.left + (self.right - self.left)/2.0 + (self.right - self.left)/2.0*math.cos(self.angle)),
int(1 + self.top + (self.bottom - self.top)/2.0 - (self.bottom - self.top)/2.0*math.sin(self.angle))),
)
class ScopePlotter(object):
def __init__(self, g, left = 0, right = 1, top = 0, bottom = 1, ymin = None, ymax = None, n = 128, title = None):
self.g = g
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.ymax = ymax
self.ymin = ymin
self.data = np.array([ np.nan ] * n, dtype = np.float32)
self.data[0] = 0.0
self.pointer = 0
self.title = title
def get_nice_scale_bound(self, value):
if value < 1e-6:
return 1.0
absvalue = np.abs(value)
abslogscale = np.ceil(np.log(absvalue) / np.log(10) * 3)
if abslogscale % 3 == 0:
return np.sign(value) * (1 * 10 ** (abslogscale / 3))
if abslogscale % 3 == 1:
return np.sign(value) * (2 * 10 ** ((abslogscale - 1) / 3))
if abslogscale % 3 == 2:
return np.sign(value) * (5 * 10 ** ((abslogscale - 2) / 3))
def update(self, value):
self.data[self.pointer] = value
self.pointer = (self.pointer + 1) % len(self.data)
def plot(self):
points = []
ymin = self.ymin
ymax = self.ymax
if ymin is None or ymax is None:
# Autoscale
ymax = self.get_nice_scale_bound(np.nanmax(self.data))
if np.nanmin(self.data) < 0:
ymin = -ymax
else:
ymin = 0.0
for i in range(len(self.data)):
if not np.isnan(self.data[i]):
points.append(
(int(float(i)/len(self.data)*(self.right - self.left) + self.left),
int((1.0 - (self.data[i] - ymin) / (ymax - ymin)) * (self.bottom - self.top) + self.top))
)
self.g.set_color(termgraphics.COLOR_WHITE)
for i in range(len(points) - 1):
self.g.line(points[i], points[i+1])
if self.title:
self.g.set_color((127, 127, 127))
self.g.text(self.title, (int((self.left + self.right) / 2 - 2 * len(self.title) / 2), int(self.top)))
self.g.set_color((63, 63, 63))
self.g.text("{:2.4f}".format(ymax).rstrip("0").rstrip("."), (int(self.left), int(self.top)))
self.g.text("{:2.4f}".format((ymax + ymin)/2).rstrip("0").rstrip("."), (int(self.left), int(self.top + (self.bottom - self.top) / 2 )))
self.g.text("{:2.4f}".format(ymin).rstrip("0").rstrip("."), (int(self.left), int(self.bottom)))
| [
"dheera@dheera.net"
] | dheera@dheera.net |
1d8207c5e65b0909fd892165306a025151dbdb7b | 8629f82f971f4e036c2b6358fe353a2c88bfd098 | /scripts/annotation/assign_synonyms_to_annotations_from_augustus_gff.py | 0cd6ecd78123b323d158c0fb3e1509c3ad021683 | [
"MIT"
] | permissive | mahajrod/MAVR | 92828fa1c191b5f8ed08f1ba33f1684df09742cd | 8c57ff5519f130357e36e6f12868bc997e52a8a7 | refs/heads/master | 2023-08-25T01:02:24.738724 | 2023-08-22T15:13:39 | 2023-08-22T15:13:39 | 21,181,911 | 11 | 6 | null | 2017-09-18T20:25:16 | 2014-06-24T21:45:57 | Python | UTF-8 | Python | false | false | 1,060 | py | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Tools.Annotation import AUGUSTUS
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_gff", action="store", dest="input_gff", required=True,
help="Input gff from AUGUSTUS")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-p", "--species_prefix", action="store", dest="species_prefix", required=True,
help="Species prefix to use in ids")
parser.add_argument("-n", "--number_of_digits_in_number", action="store", dest="number_of_digits_in_number", type=int,
default=8, help="Number of digits in id. Default - 8")
args = parser.parse_args()
AUGUSTUS.assign_synonyms_to_annotations_from_augustus_gff(args.input_gff, args.output_prefix, args.species_prefix,
number_of_digits_in_number=args.number_of_digits_in_number)
| [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
6ac594ab2f1141ec1102f30becfbf1867fd8e8f1 | fc1dadb3d5b6aee76317423191b2d0b1a3eaec48 | /02_第二月/10day/01_plan_wal.py | a9430a5314fcaf0346d4846439499559c3458c49 | [] | no_license | 2332256766/work | 9dacb9a0cce28b2f5de1e574bbead1a1e37ae0a6 | 24e15aa16949508eec0f25f45a8ad18b3bfa5993 | refs/heads/master | 2020-03-27T20:25:54.669064 | 2018-09-02T09:23:17 | 2018-09-02T09:23:17 | 147,069,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | #框架
import pygame
from sprites import *
class Plan_wal(object):
'''飞机大战主游戏'''
def __init__(self):
'''游戏初始化'''
self.sereen=pygame.display.set_mode(SCREEN_RECT.size)#屏幕
self.pygame.time.Clock()#时钟
self.__create_sprites()#精灵组
def Start_Game(self):
'''开始游戏'''
print('开始游戏...')
while True:
self.Clock.tike(60)
self.__Event_hardler(self)
self.__check_collide(self)
self.__UpDate_sprites()
self.
self.
def __Evenet_Key(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
PlaneGame.__game_over
def __Check_collide(self):
def __Screen(self):
self.pygame.display.update() 更新屏幕显示
def __UpDate_Srites(self):
def __Game_over(self):
| [
"2332256766@qq.com"
] | 2332256766@qq.com |
ee8f762690c3aee66d0ee657ebf195e4281628cf | 4781d9293b59a5072647bb179195b143c60621bd | /SW Expert Academy/1249_보급로/1249_보급로.py | 8cf447f6f7cd6f2decc6a4746c13f67125d85a6f | [] | no_license | chriskwon96/Algorithm_codes | bf98131f66ca9c091fe63db68b220527800069c9 | edb7b803370e87493dad4a38ee858bb7bb3fd31d | refs/heads/master | 2023-08-15T18:48:26.809864 | 2021-10-12T13:43:21 | 2021-10-12T13:43:21 | 387,803,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | di = [0, +1, 0, -1]
dj = [+1, 0, -1, 0]
def bfs():
global min_dis
while q:
x, y = q.pop(0)
dis = min_matrix[x][y] # [x,y]까지 도착하는데 걸린 지금까지의 최소거리
for k in range(4):
new_x, new_y = x + di[k], y + dj[k]
if 0 <= new_x <= N-1 and 0 <= new_y <= N-1 and (new_x or new_y): # G에 도착 전
if [new_x, new_y] == G: # G에 도착
if dis < min_dis: # G 새로운 최소 거리 갱신
min_dis = dis
else:
if dis+matrix[new_x][new_y] < min_matrix[new_x][new_y]: #새로운 최소거리 갱신
min_matrix[new_x][new_y] = dis+matrix[new_x][new_y]
q.append([new_x, new_y])
T = int(input())
for t in range(1, T+1):
N = int(input())
matrix = [list(map(int, input())) for _ in range(N)]
min_dis = 100 * 100 * 10
min_matrix = [[min_dis]*N for _ in range(N)]
min_matrix[0][0] = 0
S, G = [0, 0], [N-1, N-1]
q = [S]
bfs()
print('#{} {}'.format(t, min_dis))
| [
"chriskwon96@naver.com"
] | chriskwon96@naver.com |
5376893589b9fd21e00e15b6c5060c0bcf1ab938 | 650dbc2f9642ebccb3a8d1c79d45f56706fa39be | /tests/test_cli.py | bfd9aee9e623238294aa3fd2a90771891f3a0464 | [
"Apache-2.0"
] | permissive | Z2PackDev/symmetry_representation | 72f7f7b6b54c3d055b173833bbb1e60fcc9e7c38 | defb21f66831b2314226469b8bdb68ef84b7855b | refs/heads/develop | 2021-01-23T03:27:32.346583 | 2020-04-29T10:50:00 | 2020-04-29T10:50:00 | 86,079,575 | 6 | 3 | Apache-2.0 | 2020-04-29T10:50:02 | 2017-03-24T14:56:33 | Python | UTF-8 | Python | false | false | 1,616 | py | # (c) 2017-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the command-line interface ``symmetry-repr``.
"""
import tempfile
from click.testing import CliRunner
import symmetry_representation as sr
from symmetry_representation._cli import cli
def test_filter_symmetries_noop(unstrained_poscar, symmetries_file):
"""
Test that filtering symmetries does nothing when the unstrained structure
is given.
"""
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
runner.invoke(
cli, [
'filter-symmetries', '-s', symmetries_file, '-l',
unstrained_poscar, '-o', out_file.name
],
catch_exceptions=False
)
result = sr.io.load(out_file.name)
reference = sr.io.load(symmetries_file)
assert len(result) == len(reference)
assert len(result[1].symmetries) == len(reference[1].symmetries)
def test_filter_symmetries_strained(strained_poscar, symmetries_file):
"""
Test that filtering symmetries works when a strained structure is given.
"""
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
runner.invoke(
cli, [
'filter-symmetries', '-s', symmetries_file, '-l',
strained_poscar, '-o', out_file.name
],
catch_exceptions=False
)
result = sr.io.load(out_file.name)
reference = sr.io.load(symmetries_file)
assert len(result) == len(reference)
assert len(result[1].symmetries) == 4
| [
"greschd@gmx.ch"
] | greschd@gmx.ch |
3a0ebead1d4630b5c9353d068fb039ffb4eec76e | 83f443f454716d534eff57ef399f86aa9a267b20 | /f4_func_model/visual/0_feature_scatter_series.py | 3c349e597cf1f1a1cbf88b3b7af5d93cebeb401e | [] | no_license | Gilbert-Gb-Li/Artificial-Intelligence | fef400c9e767ba7e348e1f135164da1e13c04592 | 28247627eab50613c1a5bf67f70e979a0a9eecb2 | refs/heads/master | 2021-04-23T19:46:19.556837 | 2020-04-09T08:47:42 | 2020-04-09T08:47:42 | 249,986,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
"""
查看数据关系
"""
import pandas as pd
data_train = pd.read_csv("data/train.csv")
# print("看每列统计信息", data_train.describe())
# 连续型数据
import matplotlib.pyplot as plt
rm = data_train['Age']
medv = data_train['Survived']
'''
参数:
x: rm, 横坐标
y: medv, 纵坐标
c: medv, 打印的点坐标,也可以为颜色
'''
plt.scatter(rm, medv, c=medv)
rm = data_train['Fare']
medv = data_train['Survived']
plt.scatter(rm, medv, c='b')
plt.show() | [
"gb.l@foxmail.com"
] | gb.l@foxmail.com |
1025074231b68af591fa8350f174052a89662938 | 2f9d80880aa96d664f67e452b4c9b218f0b01194 | /scripts/serve.py | a54600be880b85b9bbb1092e0344167b54178ccc | [] | no_license | janezd/janezd.github.io | 81764cc45618f111dc7d667989869217b6a37d14 | db80592cc625d32f788816a3820675afef808af8 | refs/heads/master | 2021-06-18T22:34:28.474969 | 2021-03-18T11:03:08 | 2021-03-18T11:03:08 | 34,021,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import webbrowser
import http.server
import socketserver
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", 8000), Handler)
webbrowser.open_new_tab("http://127.0.0.1:8000")
httpd.serve_forever()
| [
"janez.demsar@fri.uni-lj.si"
] | janez.demsar@fri.uni-lj.si |
1d69114b5c7841ef7ea232770ae36d22a2c00b78 | 82b3bcc6467c93c8b84948e7df1ec32fe4c4b004 | /WEEK 5/Day 4/OOP-lesson-day4/code.py | 4e24fda0762e4cceb8040e801591512b560b69fa | [] | no_license | MrAch26/Developers_Institute | b7868fc94556bfced4cb53537278c3ec17991426 | 840c9710278af033ccdb5f5c3edd7a2a97476aba | refs/heads/master | 2023-03-28T19:31:11.666544 | 2021-04-06T06:54:20 | 2021-04-06T06:54:20 | 298,250,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # with open("jaime.txt", "a+") as f:
# f.write("jaime saucisses2\n")
# def getline(file_path, line_num):
# with open(file_path , 'r') as f:
# for i in range(line_num):
# line = f.readline()
# return line
# exe_lesson
with open('exe.txt','r') as f:
# print(f.read())
# print(f.readline(5))
character = f.readline()
print(character[0:5])
| [
"MrAch26@users.noreply.github.com"
] | MrAch26@users.noreply.github.com |
9162539551e9c8c53f4deadca2948187db610615 | 99a0d0aef548d7573a62076e2b118ed6b9cb68c9 | /transitionMatrix/creditratings/creditcurve.py | 69346d0ebc5d5964d49284e60d2775c6027e2f72 | [
"Apache-2.0"
] | permissive | sanjaymisri/transitionMatrix | cbf7a4311944e4cf008a07cd14393bfd40e00add | 6f390452fa2f78512abe625acef4acf4d73d8569 | refs/heads/master | 2023-05-27T10:02:41.146912 | 2021-06-09T07:52:23 | 2021-06-09T07:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,191 | py | # encoding: utf-8
# (c) 2017-2021 Open Risk (https://www.openriskmanagement.com)
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
""" This module provides objects related to credit curves
* CreditCurve_ implements the functionality of a collection of credit (default curves)
"""
import numpy as np
import pandas as pd
class CreditCurve(np.matrix):
""" The _`CreditCurve` object implements a typical collection of `credit curves <https://www.openriskmanual.org/wiki/Credit_Curve>`_.
The class inherits from numpy matrices and implements additional properties specific to curves.
"""
def __new__(cls, values=None, json_file=None, csv_file=None):
""" Create a new credit curve set. Different options for initialization are:
* providing values as a list of list
* providing values as a numpy array (The rows are the different curves, the columns are different periods)
* loading from a csv file
* loading from a json file
Without data, a default identity matrix is generated with user specified dimension
:param values: initialization values
:param json_file: a json file containing transition matrix data
:param csv_file: a csv file containing transition matrix data
:type values: list of lists or numpy array
:returns: returns a CreditCurve object
:rtype: object
.. note:: The initialization in itself does not validate if the provided values form indeed a credit curve
:Example:
.. code-block:: python
A = tm.CreditCurve(values=[[0.1, 0.2, 0.3], [0.2, 0.6, 0.8], [0.01, 0.02, 0.06]])
"""
obj = None
if values is not None:
# Initialize with given values
obj = np.asarray(values).view(cls)
elif json_file is not None:
# Initialize from file in json format
q = pd.read_json(json_file)
obj = np.asarray(q.values).view(cls)
elif csv_file is not None:
# Initialize from file in csv format
q = pd.read_csv(csv_file, index_col=None)
obj = np.asarray(q.values).view(cls)
# validation flag is set to False at initialization
obj.validated = False
# temporary dimension assignment (must validated for squareness)
obj.dimension = obj.shape[0]
return obj
def to_json(self, file):
"""
Write credit curves to file in json format
:param file: json filename
"""
q = pd.DataFrame(self)
q.to_json(file, orient='values')
def to_csv(self, file):
"""
Write credit curves to file in csv format
:param file: csv filename
"""
q = pd.DataFrame(self)
q.to_csv(file, index=None)
def to_html(self, file=None):
html_table = pd.DataFrame(self).to_html()
if file is not None:
file = open(file, 'w')
file.write(html_table)
file.close()
return html_table
def validate(self, accuracy=1e-3):
""" Validate required properties of a credit curve set. The following are checked
1. check that all values are probabilities (between 0 and 1)
2. check that values are non-decreasing
:param accuracy: accuracy level to use for validation
:type accuracy: float
:returns: List of tuples with validation messages
"""
validation_messages = []
curve_set = self
curve_set_size = curve_set.shape[0]
curve_set_periods = curve_set.shape[1]
# checking that values of curve_set are within allowed range
for i in range(curve_set_size):
for j in range(curve_set_periods):
if curve_set[i, j] < 0:
validation_messages.append(("Negative Probabilities: ", (i, j, curve_set[i, j])))
if curve_set[i, j] > 1:
validation_messages.append(("Probabilities Larger than 1: ", (i, j, curve_set[i, j])))
# checking monotonicity
for i in range(curve_set_size):
for j in range(1, curve_set_periods):
if curve_set[i, j] < curve_set[i, j - 1]:
validation_messages.append(("Curve not monotonic: ", (i, j)))
if len(validation_messages) == 0:
self.validated = True
return self.validated
else:
self.validated = False
return validation_messages
def hazard_curve(self):
""" Compute hazard rates
.. Todo:: Compute hazard rates
:return: TODO
"""
pass
def characterize(self):
""" Analyse or classify a credit curve according to its properties
* slope of hazard rate
.. Todo:: Further characterization
"""
pass
def print_curve(self, format_type='Standard', accuracy=2):
""" Pretty print a set of credit curves
:param format_type: formatting options (Standard, Percent)
:type format_type: str
:param accuracy: number of decimals to display
:type accuracy: int
"""
for s_in in range(self.shape[0]):
for s_out in range(self.shape[1]):
if format_type is 'Standard':
format_string = "{0:." + str(accuracy) + "f}"
print(format_string.format(self[s_in, s_out]) + ' ', end='')
elif format_type is 'Percent':
print("{0:.2f}%".format(100 * self[s_in, s_out]) + ' ', end='')
print('')
print('')
| [
"openrisk@outlook.com"
] | openrisk@outlook.com |
588d505ccf3c70d90d3b432b71dc6bfb0e86d02f | 27c94d7e040902d3cdadd5862b15e67ec2ee4b6e | /xautodl/xmisc/__init__.py | e47d2bcdd1ea061efebcc00defcb6e05832ea773 | [
"MIT"
] | permissive | D-X-Y/AutoDL-Projects | 8a0779a7710d809af2b052787928d8d34c14d0d9 | f46486e21b71ae6459a700be720d7648b5429569 | refs/heads/main | 2023-08-13T10:53:49.550889 | 2022-04-24T22:18:16 | 2022-04-24T22:18:16 | 168,538,768 | 989 | 197 | MIT | 2022-04-24T22:16:21 | 2019-01-31T14:30:50 | Python | UTF-8 | Python | false | false | 1,138 | py | #####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
#####################################################
"""The module and yaml related functions."""
from .module_utils import call_by_dict
from .module_utils import call_by_yaml
from .module_utils import nested_call_by_dict
from .module_utils import nested_call_by_yaml
from .yaml_utils import load_yaml
from .torch_utils import count_parameters
from .logger_utils import Logger
"""The data sampler related classes."""
from .sampler_utils import BatchSampler
"""The meter related classes."""
from .meter_utils import AverageMeter
"""The scheduler related classes."""
from .scheduler_utils import CosineParamScheduler, WarmupParamScheduler, LRMultiplier
def get_scheduler(indicator, lr):
if indicator == "warm-cos":
multiplier = WarmupParamScheduler(
CosineParamScheduler(lr, lr * 1e-3),
warmup_factor=0.001,
warmup_length=0.05,
warmup_method="linear",
)
else:
raise ValueError("Unknown indicator: {:}".format(indicator))
return multiplier
| [
"280835372@qq.com"
] | 280835372@qq.com |
d48b8916b6e44d99e3d4d91d89749b246e23a98a | 7900235c769094ba3ae6e135498569b2cb473908 | /econ_platform_core/databases/__init__.py | c9dfc5901ae469d89087f9a8a9a2a0a8122dce1d | [
"Apache-2.0"
] | permissive | dlcatt/platform | 0dc423e9209348f3dce73fc840c27473d3828ca4 | c80614d847ea83643bae597ee72ac1749cd3b401 | refs/heads/master | 2023-05-01T16:14:42.950098 | 2019-05-23T01:19:28 | 2019-05-23T01:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | """
Database managers.
Minimal implementation
Copyright 2019 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from econ_platform_core import PlatformConfiguration, SeriesMetadata
#
#
# class DatabaseManager(object):
# """
# This is the base class for Database Managers.
#
# Note: Only support full series replacement for now.
# """
# def __init__(self, name='Virtual Object'):
# self.Name = name
# # This is overridden by the AdvancedDatabase constructor.
# # By extension, everything derived from this base class (like the TEXT dabase is "not advanced."
# self.IsAdvanced = False
# if not name == 'Virtual Object':
# self.Code = PlatformConfiguration['DatabaseList'][name]
# self.ReplaceOnly = True
#
# def Find(self, ticker):
# """
# Can we find the ticker on the database? Default behaviour is generally adequate.
# :param ticker: str
# :return: SeriesMetadata
# """
# try:
# provider_code, query_ticker = ticker.split('@')
# except:
# return self._FindLocal(ticker)
# meta = SeriesMetadata()
# meta.ticker_local = ''
# meta.ticker_full = ticker
# meta.ticker_query = query_ticker
# meta.series_provider_code = provider_code
# meta.Exists = self.Exists(ticker)
# # Provider-specific meta data data not supported yet.
# return meta
#
# def _FindLocal(self, local_ticker):
# """
# Databases that support local tickers should override this method.
#
# :param local_ticker: SeriesMetadata
# :return:
# """
# raise NotImplementedError('This database does not support local tickers')
#
#
# def Exists(self, ticker):
# """
#
# :param ticker: str
# :return: bool
# """
# raise NotImplementedError()
#
# def Retrieve(self, series_meta):
# """
#
# :param series_meta: SeriesMetadata
# :return: pandas.Series
# """
# raise NotImplementedError()
#
# def GetMeta(self, full_ticker):
# raise NotImplementedError()
#
# def RetrieveWithMeta(self, full_ticker):
# """
# Retrieve both the meta data and the series. Have a single method in case there is
# an optimisation for the database to do both queries at once.
#
# Since we normally do not want the meta data at the same time, have the usual workflow to just
# use the Retrieve() interface.
#
# :param full_ticker: str
# :return: list
# """
# meta = self.GetMeta(full_ticker)
# ser = self.Retrieve(meta)
# return ser, meta
#
#
# def Write(self, ser, series_meta, overwrite=True):
# """
#
# :param ser: pandas.Series
# :param series_meta: SeriesMetadata
# :param overwrite: bool
# :return:
# """
# raise NotImplementedError() | [
"brianr747@gmail.com"
] | brianr747@gmail.com |
0176f7ee424b4384250fcb2bcc9d5cfbf3d321cf | 5983ea8a59cd0b9763e0eb0dfc7f26dfd2ba5e60 | /2019102962刘铎/control-demo.py | 759ffd567bd0c040b4ba92c527280b7ac8b0fa11 | [] | no_license | wanghan79/2020_Master_Python | 0d8bdcff719a4b3917caa76ae318e3f8134fa83a | b3f6f3825b66b93ec6c54ed6187f6c0edcad6010 | refs/heads/master | 2021-01-26T13:29:09.439023 | 2020-06-23T02:17:52 | 2020-06-23T02:17:52 | 243,442,589 | 11 | 6 | null | 2020-03-29T05:59:29 | 2020-02-27T05:55:52 | Python | UTF-8 | Python | false | false | 1,142 | py | #1.随机生成1000个[0,100]范围内的浮点随机数,1000个随机字符串,存放在set集合中;
# 2. 使用控制语句遍历这个集合,取出[20,50]之间的数字和包含子串“at”的字符串;
# 3.上述工作分别封装在不同的函数中,并采用主函数完成整体任务流程。下周课上我们根据这个练习讲解控制语句和函数封装相关内容。
import random
def Add(num1,num2):
a=set()
for i in range(num1):
a.add(random.uniform(0,100))
for i in range(num2):
randomlength = random.randrange(1, 12)
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
a.add(random_str)
return a
def Select(set1):
for i in set1:
if isinstance(i,float):
if i<=50 and i>=20:
print(i,end=',')
else:
if 'at' in i :
print(i,end=',')
if __name__=='__main__':
fin=Add(1000,1000)
Select(fin)
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
779a365b3ef110cd3b475470a76deef417130bc8 | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5112 Dump to Ntuple/DumpToNtuple_108531.py | beb4a244e31944150b74720b7d00c626bd476e32 | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("VlsbLook")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108531.root"),
streams = cms.untracked.vstring('Chunk699','HCAL_Trigger'),
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699)
)
process.dumpAdc = cms.EDAnalyzer('DumpAdcToNtuple',
Output = cms.untracked.string("file:/tmp/chenyi/Adc_108531.root")
)
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1000)
)
)
process.p = cms.Path(process.tbunpack*process.dumpAdc)
| [
"yichen@positron01.hep.caltech.edu"
] | yichen@positron01.hep.caltech.edu |
20560e8f02d09e30d1635b30c30c3c81b5efe211 | 37f49f6979fdc6e87fd469e66ae8d89361a22a47 | /strings.py | e880d0efeb917cdce75f716e7144b79a4bc009ee | [] | no_license | liberbell/py23 | 7e720fb809f39b068096fa954163e4c8d8f1ed58 | ff4830fe11e0d8e1506d46693dba40589c1eff23 | refs/heads/master | 2022-11-22T01:50:24.191553 | 2020-07-19T23:58:12 | 2020-07-19T23:58:12 | 277,677,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | print("World")
x = "World"
print(x)
print(x[0])
print(x[3])
# print(x[6])
# x[0] = "B"
print(x)
a, b, c, d, e = x
print(a)
print(a, b, c, d, e)
# a, b, c, d = x
# print(x)
a, b, _, _, _ = x
print(b)
# print(input("How are you?: "))
# place = input("Where are you from?: ")
place = "New York City"
print(place)
print(len(place))
print(place[:9])
print(place.startswith("N"))
print(place.startswith("n"))
print(place.endswith("y"))
print(place.endswith("City"))
print(place.count("y"))
lower_place = place.lower()
print(lower_place)
upper_place = place.upper()
print(upper_place)
print(upper_place.count("Y"))
print(lower_place == upper_place)
print(place.find("C"))
print(place.find("York"))
print(place.index("Y"))
# print(place.index("c"))
print(place.find("c"))
split_place = place.split("k")
print(split_place)
print(place.count(" "))
split_space = place.split(" ")
print(split_space)
print(len(split_space))
print(split_place)
join_char = ","
print(join_char.join(split_space))
join_char1 = " "
print(join_char1.join(split_space))
print("|".join(split_space)) | [
"liberbell@gmail.com"
] | liberbell@gmail.com |
2dd42095e9421c895f3494b15b670b96a0ec549a | ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04 | /Programming Basics/for_loop/hospital.py | ca528c6ecf42651342c32f3d7c31a485cbfe5bee | [] | no_license | AssiaHristova/SoftUni-Software-Engineering | 9e904221e50cad5b6c7953c81bc8b3b23c1e8d24 | d4910098ed5aa19770d30a7d9cdf49f9aeaea165 | refs/heads/main | 2023-07-04T04:47:00.524677 | 2021-08-08T23:31:51 | 2021-08-08T23:31:51 | 324,847,727 | 1 | 0 | null | 2021-08-08T23:31:52 | 2020-12-27T20:58:01 | Python | UTF-8 | Python | false | false | 461 | py | days = int(input())
patients_done = 0
patients_left = 0
doctors = 7
for day in range(1, days + 1):
patients = int(input())
if day % 3 == 0:
if patients_left > patients_done:
doctors += 1
if patients <= doctors:
patients_done += patients
else:
patients_done += doctors
patients_left += patients - doctors
print(f"Treated patients: {patients_done}.")
print(f"Untreated patients: {patients_left}.")
| [
"assiaphristova@gmail.com"
] | assiaphristova@gmail.com |
cb8680168861526242458981ed0a7054de71bd9e | ddce4bf452b7b68fb06403c7cabeba196229ce31 | /OTR/auto_pick_transfer/boxer_to_spi.py | a099e45e6308c0f3b258342f2b1af1881ba6f888 | [] | no_license | leschzinerlab/SPIDER | b01833dc984995dfbc50aa9a155e0f2353d7d18d | 5c46ed37ecf57097ea308dc1fdfed3e04e69ae63 | refs/heads/master | 2021-01-21T21:38:58.217903 | 2016-05-13T05:39:48 | 2016-05-13T05:39:48 | 15,805,142 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/env python
import sys
#=============================
def convert_box_to_spi(boxfile):
f1 = open(boxfile,'r')
o1 = open('%s_picks.spi' %(boxfile[:-4]),'w')
count = 1
for line in f1:
l = line.split()
x = float(l[0])
y = float(l[1])
box = float(l[2])/2
o1.write('%i\t4\t%i\t%f\t%f\t1\n' %(count,count,x+box,y+box))
count = count + 1
return box
#==============================
def convert_spi_to_box(boxfile,boxsize):
o1 = open('%s_out.box'%(boxfile[:-4]),'w')
f1 = open(boxfile,'r')
for line in f1:
if line[1] == ';':
continue
l = line.split()
x = float(l[3])-boxsize/2
y = float(l[4])-boxsize/2
o1.write('%i\t%i\t%i\t%i\t-3\n' %(int(x),int(y),int(boxsize),int(boxsize)))
#==============================
if __name__ == "__main__":
#convert_box_to_spi('../14sep22z_14sep20a_00015hl_00_00012en_00.box')
boxsize = int(sys.argv[1])
convert_spi_to_box('output/final_unt_coordinates_00286.spi',boxsize)
convert_spi_to_box('output/final_tlt_coordinates_00286.spi',boxsize)
| [
"michael.a.cianfrocco@gmail.com"
] | michael.a.cianfrocco@gmail.com |
e1e070da34cfa9e61e4817861f995f9c638c1a96 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/ef48cef3ead37fffd0a38c365b629d206354545f-<test_tightbbox>-fix.py | 8babd472735c7bda2dd9a12a54483c2dc6c0b26a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | def test_tightbbox():
(fig, ax) = plt.subplots()
ax.set_xlim(0, 1)
t = ax.text(1.0, 0.5, 'This dangles over end')
renderer = fig.canvas.get_renderer()
x1Nom0 = 9.035
assert (abs((t.get_tightbbox(renderer).x1 - (x1Nom0 * fig.dpi))) < 2)
assert (abs((ax.get_tightbbox(renderer).x1 - (x1Nom0 * fig.dpi))) < 2)
assert (abs((fig.get_tightbbox(renderer).x1 - x1Nom0)) < 0.05)
assert (abs((fig.get_tightbbox(renderer).x0 - 0.679)) < 0.05)
t.set_in_layout(False)
x1Nom = 7.333
assert (abs((ax.get_tightbbox(renderer).x1 - (x1Nom * fig.dpi))) < 2)
assert (abs((fig.get_tightbbox(renderer).x1 - x1Nom)) < 0.05)
t.set_in_layout(True)
x1Nom = 7.333
assert (abs((ax.get_tightbbox(renderer).x1 - (x1Nom0 * fig.dpi))) < 2)
assert (abs((ax.get_tightbbox(renderer, bbox_extra_artists=[]).x1 - (x1Nom * fig.dpi))) < 2) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
b63155a8128ddb5565766f18bbbea2e189b4839b | b3cf22a22bb787f982be2ddeac98cebbb2d8885b | /test.py | 8b90094139e8ea8ebbc61301908cc1010142c313 | [] | no_license | mrvbrn/HB_project | c1fea7ec77826a70efad3540d98e389dc0eda7ab | 5295c12c364cb0913711e360f49a2ac2df9bfbcf | refs/heads/master | 2023-02-21T23:38:16.967424 | 2021-06-13T18:46:32 | 2021-06-13T18:46:32 | 170,412,352 | 0 | 0 | null | 2023-02-15T21:36:49 | 2019-02-13T00:22:02 | HTML | UTF-8 | Python | false | false | 2,257 | py | import unittest
from server import app
from model import connect_to_db, db, example_data
from flask import session
class FlaskTestsBasic(unittest.TestCase):
"""Flask tests."""
def setUp(self):
"""Stuff to do before every test."""
# Get the Flask test client
self.client = app.test_client()
# Show Flask errors that happen during tests
app.config['TESTING'] = True
def test_homepage(self):
"""Test homepage page."""
result = self.client.get("/")
self.assertIn(b"KidsAppBox", result.data)
def test_register_page(self):
"""Test register page"""
result = self.client.get('/register')
self.assertIn(b"Sign Up", result.data)
def login_page(self):
result = self.client.get('/login')
self.assertIn(b"Email Adress", result.data)
def test_top_games_page(self):
"""Test top twenty game"""
result = self.client.get('/top_twenty_games')
self.assertIn(b"top twenty games", result.data)
def test_details_of_games_page(self):
"""Test details of games page"""
result = self.client.get('/top_twenty_games')
self.assertIn(b"top twenty games", result.data)
class EmployeeTestsDatabase(unittest.TestCase):
"""Flask tests that use the database."""
def setUp(self):
"""Stuff to do before every test."""
# Get the Flask test client
self.client = app.test_client()
app.config['TESTING'] = True
# Connect to test database
connect_to_db(app, db_uri="postgresql:///testdb")
# Create tables and add sample data
db.create_all()
example_data()
def tearDown(self):
"""Do at end of every test."""
db.session.close()
db.drop_all()
def test_employee_register(self):
"""can employee register?"""
employee_info = {'employee_id': "111", 'fname': "Leonard", 'lname': "Asby", 'email': "leonard@gmail.com", 'password':"test", "confirm_password": "test"}
result = self.client.post("/register", data=employee_info, follow_redirects=True)
self.assertIn(b"already registered", result.data)
if __name__ == "__main__":
unittest.main() | [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
e2ffca9e3f6f943ade39555eef168acefb3a6676 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/1530-Number-of-Good-Leaf-Nodes-Pairs/soln-1.py | a2765c34dec6fccf09dcdc84873588d9697aa6f5 | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 958 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countPairs(self, root: TreeNode, distance: int) -> int:
self.cnt = 0
def postorder(node):
counter = [0] * 11
if node is None:
return counter
left = postorder(node.left)
right = postorder(node.right)
if node.left is None and node.right is None:
counter[0] += 1
return counter
else:
for i in range(1, distance):
for j in range(1, distance - i + 1):
self.cnt += left[i - 1] * right[j - 1]
for i in range(10):
counter[i + 1] += left[i] + right[i]
return counter
postorder(root)
return self.cnt
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
c9d676b686eefee10025d9c0fd8639ee241175fb | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/batch/v20170101/__init__.py | 5a70cabaca83ac30528e1461df74f943742c9200 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .list_batch_account_keys import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:batch/v20170101:Application":
return Application(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:batch/v20170101:ApplicationPackage":
return ApplicationPackage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:batch/v20170101:BatchAccount":
return BatchAccount(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "batch/v20170101", _module_instance)
_register_module()
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
13e78cdefcbeecd1b4149eae2d48f0f2ebc5b95c | 3a093f6a40e8fb24957d277ad8f4b097d08c6d04 | /result/tools/search_interesting_events.py | c0e1bd0f554fbb96e0b1f7538f05d7f239d85422 | [] | no_license | dlont/FourTops2016 | ab9e953760e93b0e777b23478938efd30d640286 | 88c929bf98625735a92a31210f7233f799c5a10c | refs/heads/master | 2021-01-18T22:23:52.796080 | 2019-07-31T12:34:03 | 2019-07-31T12:34:03 | 72,439,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | #!/usr/bin/python
import ROOT as rt
import os
import pprint
CRANEEN_FILES_PATH = '/user/dlontkov/t2016/result/final_unblinding/filtered_samples/plots_mu_filt_custombinning_10J4M_JERSummer16_v0.0.41_weightbugfix_v3/Craneen_Data*.root' # wildcards are supported
TREE_NAME = 'Craneen__Mu' # must be Craneen__Mu or Craneen__El
ch = rt.TChain(TREE_NAME)
ch.Add(CRANEEN_FILES_PATH)
# HEADER OF THE TABLE
print '************************************************'
print '**** INTERESTING EVENTS *****'
print '************************************************'
print '{0: <15}:{1: <15}:{2: <20}{3: <10}{4: <10}{5: <10}{6: <10}{7: <10}'.format("Runnr", "LumiSec", "EventNr", "nJets", "HTX", "HTb", "BDT1", "topness")
N_READ_EVENTS = 0
N_SELECTED_EVENTS = 0
for ev in ch:
N_READ_EVENTS += 1
# Code your selection options here
if ev.nJets < 10: continue
if ev.nMtags < 4: continue
if ev.BDT1 < 0.8: continue
N_SELECTED_EVENTS += 1
# Print out Run, Lumi section, Event numbers for interesting events
print '{0: <15}:{1: <15}:{2: <20}{3: <10}{4: <10}{5: <10}{6: <10}{7: <10}{8}'.format(ev.Runnr,ev.Lumisec,ev.Evnr,
ev.nJets,round(ev.HTX,2),round(ev.HTb,2),round(ev.BDT1,3),round(ev.multitopness,3),
os.path.basename(ch.GetFile().GetName()))
#if ev.Evnr == 225770321:
if ev.Evnr == 548714092:
alljets = [round(el,1) for el in ev.jetvec]
alljets = [alljets[i:i+5] for i in range(0,len(alljets),5)]
pprint.pprint(alljets)
# FOOTER OF THE TABLE
print '************************************************'
print 'N EVENTS READ: ', N_READ_EVENTS
print 'N EVENTS SELECTED:', N_SELECTED_EVENTS
print '************************************************'
| [
"denys.lontkovskyi@cern.ch"
] | denys.lontkovskyi@cern.ch |
1289400e40bc5e9efdbeb1669148a4442fa4e748 | 3bbd8880fe7eb97e7145ae391a72e06083b8b77f | /ensemble/fincausal2021/resort.py | 04bc08fdce4b4b86fc71f4b28230c24cfb43f176 | [
"Apache-2.0"
] | permissive | Xianchao-Wu/fincausal2021 | 100822b72c362b8af82cbabfc0755ba5131a5ef6 | 58fbeef20e6b0c8982f46e85c09fdb5af2a53bf8 | refs/heads/master | 2023-07-29T18:13:14.376074 | 2021-09-16T13:56:37 | 2021-09-16T13:56:37 | 403,458,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py |
# resort ref
ref="fnp2020-train-full-eva.csv"
out="9951-albert-out/predictions.csv"
def readfn(afile):
outlist = list()
outdict = dict()
with open(afile) as br:
for aline in br.readlines():
aline = aline.strip()
outlist.append(aline)
cols = aline.split(';')
aid = cols[0]
outdict[aid] = aline
return outlist, outdict
reflist, refdict = readfn(ref)
outlist, outdict = readfn(out)
ref2 = ref.replace('.csv', '2.csv')
with open(ref2, 'w') as bw:
for aoutline in outlist:
aid = aoutline.split(';')[0]
arefline = refdict[aid]
bw.write(arefline+'\n')
print('done')
| [
"wuxianchao@gmail.com"
] | wuxianchao@gmail.com |
5712ca0d48ce626c9688c69645cda31f71b5ef62 | 329e9204350dee722f45370202b658290b7d9849 | /repl.py | 2f778e2a03e69b3dbf85c2e20bf9278c2f24cde1 | [] | no_license | 8l/rotten | 0f4ada4850f0b0001951b0eeccc0c05e82daed20 | cf50fb03f1724623c911b9e20a769186eb4c5944 | refs/heads/master | 2021-01-17T20:02:13.164754 | 2015-05-01T20:03:30 | 2015-05-01T20:03:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | #!/usr/bin/env python
import sys
import sexp
from sexp import Symbol
import vm
def read_all(f):
string = f.read()
buf, exps = sexp.parse_exps(buffer(string))
assert not buf # should have read to EOF
return exps
def read_file(filename):
with open(filename) as f:
return sexp.consify(read_all(f))
# exps is a cons-list of expressions
def write_file(filename, exps):
with open(filename, 'w') as f:
for e in sexp.cons_iter(exps):
sexp.write(f, e)
f.write('\n')
def vm_boot(filename="compile.rotc"):
print "booting up VM"
vmstate = vm.VM()
print "VM loading %s" % filename
vm_load(vmstate, filename)
print "VM loading {read,write}-file extensions"
vmstate.set_global(Symbol('read-file'), read_file)
vmstate.set_global(Symbol('write-file'), write_file)
return vmstate
def vm_load(vmstate, filename):
vmstate.run_body(read_file(filename))
def vm_call(vmstate, funcname, *args):
# perhaps I could use Thread.call somehow?
# it wasn't meant to be an external method, but maybe it could become one
instrs = sexp.consify(
[sexp.consify([Symbol("get-global"), Symbol(funcname)])]
+ [sexp.consify([Symbol("push"), x]) for x in args]
+ [sexp.consify([Symbol("call"), len(args)])])
return vmstate.run_expr(instrs)
def vm_compile_expr(vmstate, expr):
return vm_call(vmstate, "compile-exp", expr)
def vm_eval(vmstate, expr):
c = vm_compile_expr(vmstate, expr)
return vmstate.run_expr(c)
class QuitRepl(Exception): pass
def read_sexps():
# TODO: semicolons should start comments
string = ''
while True:
line = sys.stdin.readline()
if not line:
raise QuitRepl("end of input")
string += line
try:
buf, e = sexp.parse_exp(string)
except sexp.EOF:
# ran out of input before parsing a complete sexp, keep reading
continue
yield e
# if there's nothing else left on the line but whitespace, we're done reading sexps
if not buf.strip():
break
# copy the remainder of the string into a fresh string and keep reading
string = str(buf)
def repl(vmstate):
try:
while True:
sys.stdout.write('pyROTTEN> ')
sys.stdout.flush()
for exp in read_sexps():
if exp == sexp.consify([Symbol("unquote"), Symbol("quit")]):
raise QuitRepl(",quit command")
try:
val = vm_eval(vmstate, exp)
except vm.VMError as e:
sys.stdout.flush()
print >>sys.stderr, e
sys.stderr.flush()
else:
print sexp.to_str(val)
except QuitRepl:
pass
def main():
if len(sys.argv) > 1:
vmstate = vm_boot(sys.argv[1])
else:
vmstate = vm_boot()
repl(vmstate)
if __name__ == '__main__':
main()
| [
"daekharel@gmail.com"
] | daekharel@gmail.com |
3b890451d8e84d2c1c38874bc59fb384c13ab07c | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayMarketingCampaignDrawcampStatusUpdateResponse.py | 524c3b2334e3826283c9821b06b9c1a91f030918 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMarketingCampaignDrawcampStatusUpdateResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCampaignDrawcampStatusUpdateResponse, self).__init__()
self._camp_result = None
@property
def camp_result(self):
return self._camp_result
@camp_result.setter
def camp_result(self, value):
self._camp_result = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingCampaignDrawcampStatusUpdateResponse, self).parse_response_content(response_content)
if 'camp_result' in response:
self.camp_result = response['camp_result']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
a3de9793d06b682040ff3059ebf27c2cb29f1248 | 959792313c5cd27596f5cf613a825160c2879c6f | /algomaster/content/migrations/0003_auto_20150313_1915.py | 88f5d5643983cd8eed2800ff3bb5e7c732ea405e | [] | no_license | SDM-OS/algomaster | 8329e2b868b3822b46987da7c985390b273ee5ae | 82d4a07a6ef1cb7586a832709b7ca22aa72e5367 | refs/heads/master | 2020-12-24T15:04:58.475438 | 2015-03-14T01:42:55 | 2015-03-14T01:42:55 | 32,173,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20150313_1859'),
]
operations = [
migrations.AlterField(
model_name='content',
name='algo_type',
field=models.IntegerField(max_length=20, choices=[(1, b'Sorting'), (2, b'Search'), (3, b'Data Structure'), (4, b'Heaps'), (5, b'Graphs')]),
preserve_default=True,
),
]
| [
"shashiessp@gmail.com"
] | shashiessp@gmail.com |
c81c399e088ba52b6b71c1ff02612a6ac93bc464 | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /python/dgl/_ffi/object.py | e6fa7cff6de78a86ee58c535edf9868f493fc457 | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 3,750 | py | """Object namespace"""
# pylint: disable=unused-import
from __future__ import absolute_import
import ctypes
import sys
from .. import _api_internal
from .base import _FFI_MODE, _LIB, c_str, check_call, py_str
from .object_generic import convert_to_object, ObjectGeneric
# pylint: disable=invalid-name
IMPORT_EXCEPT = RuntimeError if _FFI_MODE == "cython" else ImportError
try:
# pylint: disable=wrong-import-position
if _FFI_MODE == "ctypes":
raise ImportError()
if sys.version_info >= (3, 0):
from ._cy3.core import _register_object, ObjectBase as _ObjectBase
else:
from ._cy2.core import _register_object, ObjectBase as _ObjectBase
except IMPORT_EXCEPT:
# pylint: disable=wrong-import-position
from ._ctypes.object import _register_object, ObjectBase as _ObjectBase
def _new_object(cls):
"""Helper function for pickle"""
return cls.__new__(cls)
class ObjectBase(_ObjectBase):
"""ObjectBase is the base class of all DGL CAPI object.
The core attribute is ``handle``, which is a C raw pointer. It must be initialized
via ``__init_handle_by_constructor__``.
Note that the same handle **CANNOT** be shared across multiple ObjectBase instances.
"""
def __dir__(self):
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(
_LIB.DGLObjectListAttrNames(
self.handle, ctypes.byref(size), ctypes.byref(plist)
)
)
names = []
for i in range(size.value):
names.append(py_str(plist[i]))
return names
def __hash__(self):
return _api_internal._raw_ptr(self)
def __eq__(self, other):
return self.same_as(other)
def __ne__(self, other):
return not self.__eq__(other)
def __reduce__(self):
cls = type(self)
return (_new_object, (cls,), self.__getstate__())
def __getstate__(self):
# TODO(minjie): TVM assumes that a Node (Object in DGL) can be serialized
# to json. However, this is not true in DGL because DGL Object is meant
# for runtime API, so it could contain binary data such as NDArray.
# If this feature is required, please raise a RFC to DGL issue.
raise RuntimeError("__getstate__ is not supported for object type")
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
# TODO(minjie): TVM assumes that a Node (Object in DGL) can be serialized
# to json. However, this is not true in DGL because DGL Object is meant
# for runtime API, so it could contain binary data such as NDArray.
# If this feature is required, please raise a RFC to DGL issue.
raise RuntimeError("__setstate__ is not supported for object type")
def same_as(self, other):
"""check object identity equality"""
if not isinstance(other, ObjectBase):
return False
return self.__hash__() == other.__hash__()
def register_object(type_key=None):
"""Decorator used to register object type
Examples
--------
>>> @register_object
>>> class MyObject:
>>> ... pass
Parameters
----------
type_key : str or cls
The type key of the object
"""
object_name = type_key if isinstance(type_key, str) else type_key.__name__
def register(cls):
"""internal register function"""
tindex = ctypes.c_int()
ret = _LIB.DGLObjectTypeKey2Index(
c_str(object_name), ctypes.byref(tindex)
)
if ret == 0:
_register_object(tindex.value, cls)
return cls
if isinstance(type_key, str):
return register
return register(type_key)
| [
"noreply@github.com"
] | dmlc.noreply@github.com |
bdb0fa445827b9a67ca72ba6bb2c873eba4b1c18 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /o7u9hqTW5AY3SoZgT_21.py | e84c8ccdcd92a87383990bc8775c95a8816a1a0c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py |
def switcheroo(txt):
txt = list(txt)
for i in range(len(txt) - 3):
if not txt[i+3].isalnum():
if txt[i:i+3] == ['n', 't', 's']:
txt[i:i+3] = ['n', 'c', 'e']
elif txt[i:i+3] == ['n', 'c', 'e']:
txt[i:i+3] = ['n', 't', 's']
return "".join(txt)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c89d9c6dad7018f95fbd30f471e0e869486bf10f | 91deb97afda334c5366e560325995cf6b5407bee | /src/command_modules/azure-cli-eventgrid/azure/cli/command_modules/eventgrid/sdk/models/__init__.py | ad8c48d0ee3d4ffc607bd799001b45b0ee47d8e8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | viananth/azure-cli | ab117c1b0b676026cbb57567544cd70630efe830 | 4d23492ed03e946cfc11bae23b29acb971fb137d | refs/heads/master | 2021-05-23T05:13:51.414113 | 2017-08-17T16:58:10 | 2017-08-17T16:58:10 | 95,239,804 | 0 | 0 | NOASSERTION | 2019-03-19T18:45:16 | 2017-06-23T17:01:34 | Python | UTF-8 | Python | false | false | 2,525 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.2.2.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .event_subscription_destination import EventSubscriptionDestination
from .event_subscription_filter import EventSubscriptionFilter
from .resource import Resource
from .event_subscription import EventSubscription
from .event_subscription_update_parameters import EventSubscriptionUpdateParameters
from .event_subscription_full_url import EventSubscriptionFullUrl
from .operation_info import OperationInfo
from .operation import Operation
from .tracked_resource import TrackedResource
from .topic import Topic
from .topic_shared_access_keys import TopicSharedAccessKeys
from .topic_regenerate_key_request import TopicRegenerateKeyRequest
from .event_type import EventType
from .topic_type_info import TopicTypeInfo
from .event_subscription_paged import EventSubscriptionPaged
from .operation_paged import OperationPaged
from .topic_paged import TopicPaged
from .event_type_paged import EventTypePaged
from .topic_type_info_paged import TopicTypeInfoPaged
from .event_grid_management_client_enums import (
EventSubscriptionProvisioningState,
EndpointType,
OperationOrigin,
TopicProvisioningState,
ResourceRegionType,
TopicTypeProvisioningState,
)
__all__ = [
'EventSubscriptionDestination',
'EventSubscriptionFilter',
'Resource',
'EventSubscription',
'EventSubscriptionUpdateParameters',
'EventSubscriptionFullUrl',
'OperationInfo',
'Operation',
'TrackedResource',
'Topic',
'TopicSharedAccessKeys',
'TopicRegenerateKeyRequest',
'EventType',
'TopicTypeInfo',
'EventSubscriptionPaged',
'OperationPaged',
'TopicPaged',
'EventTypePaged',
'TopicTypeInfoPaged',
'EventSubscriptionProvisioningState',
'EndpointType',
'OperationOrigin',
'TopicProvisioningState',
'ResourceRegionType',
'TopicTypeProvisioningState',
]
| [
"debekoe@microsoft.com"
] | debekoe@microsoft.com |
853efa675e5e54ced60886c3cc5a8a09cb72704d | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/refactoring/pullup/instanceNotDeclaredInInit.after.py | 812bd9ce6364e5c41d1b2a1c1b4dc11ac49605c4 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 132 | py | class Parent(object):
def __init__(self):
self.foo = 12
class Child(Parent):
def foo(self):
self.foo = 12
| [
"Ilya.Kazakevich@jetbrains.com"
] | Ilya.Kazakevich@jetbrains.com |
5f0745dea19783ffc21abbcb089c31654ed9f91a | 3ba8fe7ebb90b82a8bbc74a6ccb4417c1731d6b8 | /gui/about.py | c3a7a8a0026f45d60ef4cd790613f0eda5f58e99 | [] | no_license | Inter-Actief/JulianaNFC_Python | 347fab3e44dabd857d9f66acb602bdac3168f959 | 544f4e4c5b11f7d310b613f77023ed49770700ba | refs/heads/master | 2022-10-05T17:36:07.732058 | 2022-09-19T19:00:06 | 2022-09-19T19:00:06 | 119,987,329 | 0 | 0 | null | 2022-09-16T15:05:40 | 2018-02-02T13:48:53 | Python | UTF-8 | Python | false | false | 2,932 | py | import wx
import wx.adv
from juliana import resource_path
class AboutDialog(wx.Dialog):
def __init__(self, *args, **kwargs):
from juliana import APP_SUPPORT, APP_LINK
kwargs["style"] = kwargs.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwargs)
self.SetSize((400, 350))
self.SetIcon(wx.Icon(resource_path("resources/main.png"), type=wx.BITMAP_TYPE_PNG))
self.hyperlink_2 = wx.adv.HyperlinkCtrl(self, wx.ID_ANY, APP_LINK, APP_LINK, style=wx.adv.HL_ALIGN_CENTRE)
self.hyperlink_3 = wx.adv.HyperlinkCtrl(self, wx.ID_ANY, APP_SUPPORT, f"mailto://{APP_SUPPORT}", style=wx.adv.HL_ALIGN_CENTRE)
self.button_2 = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
def __set_properties(self):
self.SetTitle(f"About JulianaNFC")
self.SetSize((400, 350))
def __do_layout(self):
from juliana import APP_NAME, APP_VERSION, APP_AUTHOR
grid_sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1.Add((0, 10), 0, wx.EXPAND, 0)
label_1 = wx.StaticText(self, wx.ID_ANY, f"{APP_NAME} v{APP_VERSION}", style=wx.ALIGN_CENTER)
label_1.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, 0, ""))
grid_sizer_1.Add(label_1, 0, wx.ALIGN_CENTER, 8)
grid_sizer_1.Add((0, 8), 0, wx.EXPAND, 0)
static_line_1 = wx.StaticLine(self, wx.ID_ANY)
grid_sizer_1.Add(static_line_1, 0, wx.ALL | wx.EXPAND, 0)
grid_sizer_1.Add((0, 8), 0, wx.EXPAND, 0)
label_2 = wx.StaticText(self, wx.ID_ANY, f"JulianaNFC is a small tray application that allows scanning "
f"NFC cards to a websocket for use in web applications.", style=wx.ALIGN_CENTER)
label_2.Wrap(320)
grid_sizer_1.Add(label_2, 0, wx.ALIGN_CENTER, 8)
grid_sizer_1.Add((0, 8), 0, wx.EXPAND, 0)
label_3 = wx.StaticText(self, wx.ID_ANY, f"JulianaNFC was created by {APP_AUTHOR}", style=wx.ALIGN_CENTER)
label_3.Wrap(320)
grid_sizer_1.Add(label_3, 0, wx.ALIGN_CENTER, 8)
grid_sizer_1.Add((0, 8), 0, wx.EXPAND, 0)
label_6 = wx.StaticText(self, wx.ID_ANY, "For more information, check the GitHub:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(label_6, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add(self.hyperlink_2, 0, wx.ALIGN_CENTER, 0)
label_7 = wx.StaticText(self, wx.ID_ANY, "For support, mail the WWW-committee:", style=wx.ALIGN_CENTER)
grid_sizer_1.Add(label_7, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add(self.hyperlink_3, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add((0, 10), 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.button_2, 0, wx.ALIGN_CENTER, 0)
grid_sizer_1.Add((0, 10), 0, wx.EXPAND, 0)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
self.Layout()
| [
"kevin@kevinalberts.nl"
] | kevin@kevinalberts.nl |
f2ab5f5e63062a237402ca2ffbc53bafd2682b65 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/compute/alpha/get_instance_template.py | 54a39f0c6561be6f83e81c7ee3821cc744291a22 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 8,058 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetInstanceTemplateResult',
'AwaitableGetInstanceTemplateResult',
'get_instance_template',
'get_instance_template_output',
]
@pulumi.output_type
class GetInstanceTemplateResult:
def __init__(__self__, creation_timestamp=None, description=None, kind=None, name=None, properties=None, region=None, self_link=None, self_link_with_id=None, source_instance=None, source_instance_params=None):
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if self_link_with_id and not isinstance(self_link_with_id, str):
raise TypeError("Expected argument 'self_link_with_id' to be a str")
pulumi.set(__self__, "self_link_with_id", self_link_with_id)
if source_instance and not isinstance(source_instance, str):
raise TypeError("Expected argument 'source_instance' to be a str")
pulumi.set(__self__, "source_instance", source_instance)
if source_instance_params and not isinstance(source_instance_params, dict):
raise TypeError("Expected argument 'source_instance_params' to be a dict")
pulumi.set(__self__, "source_instance_params", source_instance_params)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
The creation timestamp for this instance template in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def kind(self) -> str:
"""
The resource type, which is always compute#instanceTemplate for instance templates.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.InstancePropertiesResponse':
"""
The instance properties for this instance template.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def region(self) -> str:
"""
URL of the region where the instance template resides. Only applicable for regional resources.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
The URL for this instance template. The server defines this URL.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="selfLinkWithId")
def self_link_with_id(self) -> str:
"""
Server-defined URL for this resource with the resource id.
"""
return pulumi.get(self, "self_link_with_id")
@property
@pulumi.getter(name="sourceInstance")
def source_instance(self) -> str:
"""
The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance
"""
return pulumi.get(self, "source_instance")
@property
@pulumi.getter(name="sourceInstanceParams")
def source_instance_params(self) -> 'outputs.SourceInstanceParamsResponse':
"""
The source instance params to use to create this instance template.
"""
return pulumi.get(self, "source_instance_params")
class AwaitableGetInstanceTemplateResult(GetInstanceTemplateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceTemplateResult(
creation_timestamp=self.creation_timestamp,
description=self.description,
kind=self.kind,
name=self.name,
properties=self.properties,
region=self.region,
self_link=self.self_link,
self_link_with_id=self.self_link_with_id,
source_instance=self.source_instance,
source_instance_params=self.source_instance_params)
def get_instance_template(instance_template: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceTemplateResult:
"""
Returns the specified instance template.
"""
__args__ = dict()
__args__['instanceTemplate'] = instance_template
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:compute/alpha:getInstanceTemplate', __args__, opts=opts, typ=GetInstanceTemplateResult).value
return AwaitableGetInstanceTemplateResult(
creation_timestamp=pulumi.get(__ret__, 'creation_timestamp'),
description=pulumi.get(__ret__, 'description'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
region=pulumi.get(__ret__, 'region'),
self_link=pulumi.get(__ret__, 'self_link'),
self_link_with_id=pulumi.get(__ret__, 'self_link_with_id'),
source_instance=pulumi.get(__ret__, 'source_instance'),
source_instance_params=pulumi.get(__ret__, 'source_instance_params'))
@_utilities.lift_output_func(get_instance_template)
def get_instance_template_output(instance_template: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceTemplateResult]:
"""
Returns the specified instance template.
"""
...
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
8ca9e51593e30c73f9bb25495144564d74b35b0a | 90d3b9467dcc6763865cad90a04a247cafcf5862 | /phone_lap/admin.py | 3689c839f351ea3abd323228aac7cfa79466eda0 | [] | no_license | vandat9xhn/django_1 | 0fa51515549eab04c27bdfeaf9e43650fe44dc70 | 6669e172d6b5a2a729dd31ea43d6c08f76b6e19c | refs/heads/master | 2023-06-23T19:46:26.558871 | 2021-07-26T15:11:12 | 2021-07-26T15:11:12 | 375,704,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib import admin
#
from . import models
# Register your models here.
admin.site.register(models.PhoneLapModel)
admin.site.register(models.TypeModel)
admin.site.register(models.VidPicModel)
admin.site.register(models.OrderModel)
| [
"vandat9xiloveyou@gmail.com"
] | vandat9xiloveyou@gmail.com |
a2781d5a4bfc616f117c18260148142070a8ba7e | 889d85d5b12d099f3ba5ce0083a828181da9e973 | /2020-05-month-long-challenge/day27.py | dacee41890711cc1001d54b2f1d02638816db1d0 | [
"Unlicense"
] | permissive | jkbockstael/leetcode | def0fca6605e3432b979c145979a3f3de225d870 | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | refs/heads/master | 2021-05-23T08:54:15.313887 | 2020-09-24T20:53:46 | 2020-09-24T20:53:46 | 253,208,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | #!/usr/bin/env python3
# Day 27: Possible Bipartition
#
# Given a set of N people (numbered 1, 2, ..., N), we would like to split
# everyone into two groups of any size.
# Each person may dislike some other people, and they should not go into the
# same group.
# Formally, if dislikes[i] = [a, b], it means it is not allowed to put the
# people numbered a and b into the same group.
# Return true if and only if it is possible to split everyone into two groups
# in this way.
#
# Notes:
# - 1 <= N <= 2000
# - 0 <= dislikes.length <= 10000
# - 1 <= dislikes[i][j] <= N
# - dislikes[i][0] < dislikes[i][1]
# - There does not exist i != j for which dislikes[i] == dislikes[j].
class Solution:
def possibleBipartition(self, N: int, dislikes: [[int]]) -> bool:
# Parse the input as an undirected graph
dislike_graph = {}
for a, b in dislikes:
if a not in dislike_graph:
dislike_graph[a] = []
if b not in dislike_graph:
dislike_graph[b] = []
dislike_graph[a].append(b)
dislike_graph[b].append(a)
# Recursive assignment into groups:
# Assign a person to a group, then assign all their dislikes to the
# other group. If at some point we encounter a person that's already in
# a group and we should assign them to the other, this means the graph
# isn't bipartite.
groups = {}
def traverse(node, group):
if node in groups:
return groups[node] == group
groups[node] = group
group = 0 if group == 1 else 1
neighbors = dislike_graph[node] if node in dislike_graph else []
return all(traverse(neighbor, group) for neighbor in neighbors)
# This must hold true for everybody
return all(traverse(node, 0) for node in range(1, N + 1) \
if node not in groups)
# Tests
assert Solution().possibleBipartition(4, [[1,2],[1,3],[2,4]]) == True
assert Solution().possibleBipartition(3, [[1,2],[1,3],[2,3]]) == False
assert Solution().possibleBipartition(5, [[1,2],[2,3],[3,4],[4,5],[1,5]]) == False
assert Solution().possibleBipartition(4, []) == True
| [
"jkb@jkbockstael.be"
] | jkb@jkbockstael.be |
909b288704ecae9ad0d2579d7ae9711f752a5b9a | f0b8e22e3918cf89c13562459edd1579bf046fce | /main.py | bd30d105820381647fd971948f6f28548a954851 | [] | no_license | webclinic017/Algorithmic_Trading_Bot | 06b0a54830c91377d36445fd06d6a7d48fcfb5bf | 6dbc4d7624f94c3703abacea421b13c6cca04e7c | refs/heads/main | 2023-05-02T08:26:09.717877 | 2021-05-22T21:14:10 | 2021-05-22T21:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,776 | py | import numpy as np
class BreakoutAlgorithm(QCAlgorithm):
# initialize values
def Initialize(self):
# starting cash balance is $100,000 USD for testing purposes
self.SetCash(100000)
# set start and end dates for backtesting
self.SetStartDate(2017,9,1)
self.SetEndDate(2021,4,1)
# add asset to algorithm using .AddEquity. First parameter is S&P500 equity to add, second parameter is Daily resolution of data
self.symbol = self.AddEquity("SPY", Resolution.Daily).Symbol
# initialize number of days we will look back to determine breakout point in strategy. This will be changed dynamically based on changes in volatility
self.lookback = 20
# constraints so lookback length does not get too big or too small
# 30 days for upper limit, 10 days for lower limit
self.ceiling, self.floor = 30, 10
# how close first stop loss is to security price. initial value of 0.98 will allow for 2 percent loss before it gets triggered
self.initialStopRisk = 0.98
# how close trailing stop will follow asset's price. initial value of 0.9 will trail for 10%
self.trailingStopRisk = 0.9
# first parameter specifies which days method is called, second parameter specifies the time method is called (20 minutes after market open below), third specifies which method is called
self.Schedule.On(self.DateRules.EveryDay(self.symbol), \
self.TimeRules.AfterMarketOpen(self.symbol, 20), \
Action(self.EveryMarketOpen))
# method called every time algorithm receives new data. method will then decide what action to take with data
def OnData(self, data):
# first argument is name of chart to create, second is name of plot, last is data of plot
self.Plot("Data Chart", self.symbol, self.Securities[self.symbol].Close)
def EveryMarketOpen(self):
# history method returns close, high, low and open price over the past 31 days, although we want only the close
close = self.History(self.symbol, 31, Resolution.Daily)["close"]
# to calculate volatility, we take standard deviation of the closing price over the past 30 days for the current day
todayvol = np.std(close[1:31])
# calculate volatility through standard deviation of the closing price over past 30 days for day before
yesterdayvol = np.std(close[0:30])
# calculate change between volatility of two days
deltavol = (todayvol - yesterdayvol) / todayvol
# multiply current lookback length by (change in delta) + 1; ensures that lookback length increases when volatility increases, and vice-versa
self.lookback = round(self.lookback * (1 + deltavol))
# check if lookback length is within previously defined upper and lower limits. If it is not, ensure that it is, otherwise do nothing
if self.lookback > self.ceiling:
self.lookback = self.ceiling
elif self.lookback < self.floor:
self.lookback = self.floor
# check if breakout is happening. Once again, history returns, in this case, the high price over the period in the lookback length
self.high = self.History(self.symbol, self.lookback, Resolution.Daily)['high']
# prior to securing any position, verify two things
# 1) that pre-existing investment does not exist
# 2) verify if last closing price is higher that highest high (self.high variable); last variable is left out so as to not compare yesterday's high to yesterday's close
# if both conditions are met, purchase SPY at market price using SetHoldings
if not self.Securities[self.symbol].Invested and self.Securities[self.symbol].Close >= max(self.high[:-1]):
# first parameter is SPY, second is percentage of portfolio that should be allocated to purchase (1 for 100% for demonstration)
self.SetHoldings(self.symbol, 1)
self.breakoutlvl = max(self.high[:-1])
self.highestPrice = self.breakoutlvl
# implement trailing stop loss, which is only relevant if there is already a pre-existing open position
if self.Securities[self.symbol].Invested:
# verify there are no open orders; below GetOpenOrders function will return a collection of orders for security
if not self.Transactions.GetOpenOrders(self.symbol):
# send stoploss order; 1st argument is the security, second is the number of shares, third is stoploss price
# self.Portfolio[].Quantity will return current number of shares owned, minus indicates sell-order
# stoploss price is calculated by multiplying breakout level by initialized stop risk of 0.98, giving risk of 2%
self.stopMarketTicket = self.StopMarketOrder(self.symbol, -self.Portfolio[self.symbol].Quantity, self.initialStopRisk * self.breakoutlvl)
# increase stoploss everytime security makes new highs. if no new highs are attained, stoploss remains unchanged
# conditions mets only if trading stop loss is not below initial stop loss price
if self.Securities[self.symbol].Close > self.highestPrice and self.initialStopRisk * self.breakoutlvl < self.Securities[self.symbol].Close * self.trailingStopRisk:
# if conditions are met, set highest price to latest closing price
self.highestPrice = self.Securities[self.symbol].Close
# create UpdateOrderFields() object to update order price of stop loss so that it rises together with securities price
updateFields = UpdateOrderFields()
# new price is calculated by multiplying trailing stop risk, initialized to 0.9, by latest closing price
updateFields.StopPrice = self.Securities[self.symbol].Close * self.trailingStopRisk
# update existing stoploss order
self.stopMarketTicket.Update(updateFields)
# print new stop price to console so that nee order price can be checked every time it is updated
self.Debug(updateFields.StopPrice)
# plot stop price of position onto previous data chart; allows for viewing where stoploss is relative to trading price of securities
self.Plot("Data Chart", "Stop Price", self.stopMarketTicket.Get(OrderField.StopPrice))
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
895a3ba4f29b6171028b74b06df26a4eecdb1211 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/488/usersdata/343/112929/submittedfiles/AvF_Parte3.py | 8f125232148c670250592e8b7b285e48bc5f246a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # -*- coding: utf-8 -*-
n = int(input('Quantidade de números: '))
x = []
y = []
for i in range(0,n,1):
x.append(int(input('Digite os números: ')))
y.append(x[i//2==0])
print (x)
print (y) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
77f285f6ace1b7587a27d991e9cf335a954f712d | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4179/codes/1679_1079.py | 10d13d959e10e7ad60f60284638a1cbac9416761 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # Ao testar sua solução, não se limite ao caso de exemplo.
from math import *
# Leitura dos lados do triangulo a, b, and c
a = float(input ("Lado 1: "))
b = float(input ("Lado 2: "))
c = float(input ("Lado 3: "))
print("Entradas:", a, ",", b, ",", c)
# Testa se pelo menos uma das entradas eh negativa
if (a > 0 and b > 0 and c > 0):
# Testa se medidas correspondem aas de um triangulo
if (a + b > c and a + c > b and b + c > a):
s = (a + b + c) / 2.0
area = sqrt(s * (s-a) * (s-b) * (s-c))
area = round(area, 3)
print("Area:", area)
else:
print("Area: invalida")
else:
print("Area: invalida")
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
4b63125ea3c6e629a651b3dae35c601e92fb28cc | 651a296c8f45b5799781fd78a6b5329effe702a0 | /test_mat/c8_i.py | 5d6bfc381903ec3b4ba2c3634adfe791e1286c6c | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | #!/usr/bin/env python
def c8_i ( ):
#*****************************************************************************80
#
## C8_I returns the value of the imaginary unit as a C8.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 February 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Output, real VALUE, the value of the imaginary unit.
#
value = 1j
return value
def c8_i_test ( ):
#*****************************************************************************80
#
## C8_I_TEST tests C8_I.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 February 2015
#
# Author:
#
# John Burkardt
#
print ''
print 'C8_I_TEST'
print ' C8_I returns the value of the imaginary unit.'
c1 = c8_i ( )
print ''
print ' C1=C8__I ( ) = (%g,%g)' % ( c1.real, c1.imag )
c2 = c1 * c1
print ''
print ' C2= C1 * C1 = (%g,%g)' % ( c2.real, c2.imag )
print ''
print 'C8_I_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
c8_i_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
be8ac5e8e6d033935f5bb1ae33efbaa7be138122 | b2755ce7a643ae5c55c4b0c8689d09ad51819e6b | /anuvaad-etl/anuvaad-extractor/document-processor/layout-detector/prima/src/utilities/request_parse.py | a9a266d0ae5d159187cfcf571ac3114685025490 | [
"MIT"
] | permissive | project-anuvaad/anuvaad | 96df31170b27467d296cee43440b6dade7b1247c | 2bfcf6b9779bf1abd41e1bc42c27007127ddbefb | refs/heads/master | 2023-08-17T01:18:25.587918 | 2023-08-14T09:53:16 | 2023-08-14T09:53:16 | 265,545,286 | 41 | 39 | MIT | 2023-09-14T05:58:27 | 2020-05-20T11:34:37 | Jupyter Notebook | UTF-8 | Python | false | false | 1,578 | py | import src.utilities.app_context as app_context
from anuvaad_auditor.loghandler import log_exception
import copy, json
import os
import config
def log_error(method):
def wrapper(*args, **kwargs):
try:
output = method(*args, **kwargs)
return output
except Exception as e:
log_exception('Invalid request, required key missing of {}'.format(e), app_context.application_context, e)
return None
return wrapper
class File:
def __init__(self, file):
self.file = file
@log_error
def get_format(self):
return self.file['file']['type']
@log_error
def get_name(self):
return self.file['file']['name']
@log_error
def get_pages(self):
return self.file['page_info']
@log_error
def get_words(self, page_index):
return self.file['pages'][page_index]['words']
@log_error
def get_lines(self, page_index):
return self.file['pages'][page_index]['lines']
@log_error
def get_regions(self, page_index):
return self.file['pages'][page_index]['regions']
def get_files(application_context):
files = copy.deepcopy(application_context['input']['inputs'])
return files
def get_json(name ):
#path = '/home/naresh/anuvaad/anuvaad-etl/anuvaad-extractor/document-processor/layout-detector/prima/'+os.path.join(config.BASE_DIR, name)
path = os.path.join(config.BASE_DIR, name)
with open (path, "r") as f:
data = json.loads(f.read())
json_data = data['outputs']
return json_data
| [
"srihari.nagaraj@tarento.com"
] | srihari.nagaraj@tarento.com |
8e5b64d18295f8d83d0d1111063411f0aef63b2f | 54c15fb13c5a3afd8cafb5800d0bcea3887f1614 | /third_party/top/api/rest/ItemSkuGetRequest.py | b14710596ccea624b6287272368a7c8f3ff19190 | [
"BSD-3-Clause"
] | permissive | brain-zhang/ihuilife | 30fa2f122c51e1d5949824b8b8655d7bcc8794aa | 57bd7d45e306084d2518f64da81884e56a68c95e | refs/heads/master | 2021-05-28T09:23:25.629581 | 2012-08-30T14:46:05 | 2012-08-30T14:46:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | '''
Created by auto_sdk on 2012-08-07 12:41:14
'''
from top.api.base import RestApi
class ItemSkuGetRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.fields = None
self.nick = None
self.num_iid = None
self.sku_id = None
def getapiname(self):
return 'taobao.item.sku.get'
| [
"memoryboxes@gmail.com"
] | memoryboxes@gmail.com |
df0d567f148d7585f7a981ec311e46187cb22f56 | daca1b98836d0ecc2ce3363b243a548316edbb6b | /core/cooggerapp/urls/sitemap.py | 11a6e6b6cd23b76a2dceefdadcad4f8f5d8ded11 | [
"MIT"
] | permissive | emregeldegul/coogger | c1e3de3e62e39f062198182de68890b9d93ca3c1 | 9aaf2d5904438d91b9bb403ce3c739a5431bdaac | refs/heads/master | 2020-07-26T13:19:05.353746 | 2019-09-15T20:56:11 | 2019-09-15T20:56:11 | 208,656,584 | 2 | 0 | MIT | 2019-09-15T20:57:58 | 2019-09-15T20:57:58 | null | UTF-8 | Python | false | false | 887 | py | from django.contrib.sitemaps.views import sitemap
from django.urls import path
from ..views.sitemap import (
CommitSitemap, ContentSitemap, IssueSitemap, LanuageSitemap, TopicSitemap,
UserSitemap, UtopicSitemap, robots
)
urlpatterns = [
path("robots.txt/", robots),
path("sitemap/topic.xml/", sitemap, {"sitemaps": {"topic": TopicSitemap()}}),
path(
"sitemap/language.xml/", sitemap, {"sitemaps": {"language": LanuageSitemap()}}
),
path("sitemap/utopic.xml/", sitemap, {"sitemaps": {"utopic": UtopicSitemap()}}),
path("sitemap/content.xml/", sitemap, {"sitemaps": {"content": ContentSitemap()}}),
path("sitemap/user.xml/", sitemap, {"sitemaps": {"user": UserSitemap()}}),
path("sitemap/issue.xml/", sitemap, {"sitemaps": {"issue": IssueSitemap()}}),
path("sitemap/commit.xml/", sitemap, {"sitemaps": {"commit": CommitSitemap()}}),
]
| [
"hakancelik96@outlook.com"
] | hakancelik96@outlook.com |
6d48a172d41a67fb9b00438df994d0d7511716eb | 9023909d2776e708755f98d5485c4cffb3a56000 | /oneflow/python/test/dataloader/test_numpy_dataset.py | 56e5c44ac1d8a2ff819b65947f223bcd51596971 | [
"Apache-2.0"
] | permissive | sailfish009/oneflow | f6cf95afe67e284d9f79f1a941e7251dfc58b0f7 | 4780aae50ab389472bd0b76c4333e7e0a1a56ef7 | refs/heads/master | 2023-06-24T02:06:40.957297 | 2021-07-26T09:35:29 | 2021-07-26T09:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.python.utils.data as Data
class ScpDataset(Data.Dataset):
def __init__(self, chunksize=200, dim=81, length=2000):
self.chunksize = chunksize
self.dim = dim
self.length = length
def __getitem__(self, index):
np.random.seed(index)
return np.random.randn(self.chunksize, self.dim)
def __len__(self):
return self.length
@flow.unittest.skip_unless_1n1d()
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestNumpyDataset(flow.unittest.TestCase):
def test_numpy_dataset(test_case):
dataset = ScpDataset()
dataloader = Data.DataLoader(dataset, batch_size=16, shuffle=True)
for X in dataloader:
test_case.assertEqual(X.shape, flow.Size([16, 200, 81]))
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | sailfish009.noreply@github.com |
23608989e8ac841e9d27fc8b1febcd0ff126cec0 | df38290d655488d21903e13be947442fb0a38158 | /tests/test_settings.py | ae9b6dbe71123e15e2c82bffc10d9e40d31330e1 | [
"Apache-2.0"
] | permissive | zooba/twine | 71f948c5f4be24ada5eab7581524a53ae28554fd | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | refs/heads/master | 2020-04-03T03:05:51.045532 | 2018-10-15T12:30:38 | 2018-10-15T12:30:38 | 154,976,376 | 1 | 0 | null | 2018-10-27T14:57:29 | 2018-10-27T14:57:29 | null | UTF-8 | Python | false | false | 1,950 | py | """Tests for the Settings class and module."""
# Copyright 2018 Ian Stapleton Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os.path
import textwrap
from twine import exceptions
from twine import settings
import pytest
def test_settings_takes_no_positional_arguments():
"""Verify that the Settings initialization is kw-only."""
with pytest.raises(TypeError):
settings.Settings('a', 'b', 'c')
def test_settings_transforms_config(tmpdir):
"""Verify that the settings object transforms the passed in options."""
pypirc = os.path.join(str(tmpdir), ".pypirc")
with open(pypirc, "w") as fp:
fp.write(textwrap.dedent("""
[pypi]
repository: https://upload.pypi.org/legacy/
username:username
password:password
"""))
s = settings.Settings(config_file=pypirc)
assert (s.repository_config['repository'] ==
'https://upload.pypi.org/legacy/')
assert s.sign is False
assert s.sign_with == 'gpg'
assert s.identity is None
assert s.username == 'username'
assert s.password == 'password'
assert s.cacert is None
assert s.client_cert is None
def test_identity_requires_sign():
"""Verify that if a user passes identity, we require sign=True."""
with pytest.raises(exceptions.InvalidSigningConfiguration):
settings.Settings(sign=False, identity='fakeid')
| [
"graffatcolmingov@gmail.com"
] | graffatcolmingov@gmail.com |
450e7d1eff3b352d490cce355d48ca9ab025f36e | 4d6fc7e3c2760a36c9d45e7158c8cdd4fba9e4ae | /backend/tinman_mobile_recyc_21719/wsgi.py | 24eb4b2c63c28acc026cb2bede4d2c4668c136d4 | [] | no_license | crowdbotics-apps/tinman-mobile-recyc-21719 | 789bb1901cf3709bd535403dfef6501508a6ef6c | a2c1c8e38120e8dcebf2af378b9a631c889cccde | refs/heads/master | 2022-12-19T19:34:19.167744 | 2020-10-19T19:42:40 | 2020-10-19T19:42:40 | 305,495,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for tinman_mobile_recyc_21719 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tinman_mobile_recyc_21719.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
67436b519e8731148b8a0666242ef69f0514e903 | 69c09e3e8b479c8a65043db9aafcf5193b255f8e | /RRtoolbox/lib/session.py | b0e9f12ece2843699e842c41ed92d2e8cff578fd | [
"BSD-3-Clause"
] | permissive | davtoh/RRtools | 1e5a0bf4abae9e971546f250172562841ccea029 | 6dde2d4622719d9031bf21ffbf7723231a0e2003 | refs/heads/master | 2020-05-21T20:07:11.428279 | 2018-07-17T15:24:55 | 2018-07-17T15:24:55 | 62,759,687 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | # -*- coding: utf-8 -*-
"""
This module have serializing methods for data persistence so to let the package "save" custom objects
session module made by Davtoh and powered by dill
Dependency project: https://github.com/uqfoundation/dill
"""
try:
# for security reason read this: http://www.benfrederickson.com/dont-pickle-your-data/
# download: https://pypi.python.org/pypi/dill#downloads
# see print dill.license() https://github.com/uqfoundation
# import jsonpickle as serializer # http://jsonpickle.github.io/
import cpickle as serializer
# import dill as serializer # dill must be >= 0.2.4
#__license__ = serializer.__license__
# dill.detect.trace(True)
except:
import pickle as serializer
import types
import os
from .root import secure_open
__excludeType = [types.FunctionType, types.ModuleType, type(None), type, type]
__excludeVar = []
__excludePattern = ['__']
def getEnviromentSession(enviroment=None):
"""
Gets the filtered session from the global variables.
:return: dictionary containing filtered session.
"""
enviroment = enviroment or globals()
#globals(), dir(), [type(enviroment[keys]) for keys in enviroment]
session = {}
for keys in list(enviroment.keys()):
if __excludePattern != [] and keys.startswith(*__excludePattern):
continue
if not (type(enviroment[keys]) in __excludeType or keys in __excludeVar):
session[keys] = enviroment[keys]
return session
def saveSession(filepath, session, helper=None):
"""
Saves dictionary session to file.
:param filepath: path to save session file.
:param session: dictionary
:param helper: function to pre-process session
:return: filename of saved session
"""
# safely save session file
# with os.fdopen(os.open(filepath, os.O_WRONLY | os.O_CREAT, 0600), 'wb')
# as logger: # http://stackoverflow.com/a/5624691/5288758
with secure_open(filepath, 'wb') as logger:
if helper:
serializer.dump(helper(session), logger,
serializer.HIGHEST_PROTOCOL) # save dictionary
else:
# save dictionary
serializer.dump(session, logger, serializer.HIGHEST_PROTOCOL)
return logger.name
def readSession(filepath, helper=None):
"""
Loads a dictionary session from file.
:param filepath: path to load session file.
:param helper: function to pos-process session file
:return: session
"""
# safely read session file
with secure_open(filepath, 'rb') as logger:
session = serializer.load(logger) # get session
if helper:
return helper(session)
else:
return session
def updateSession(filepath, session, replace=True, rdhelper=None, svhelper=None):
"""
Updates a dictionary session in file.
:param filepath: path to session file.
:param session: dictionary.
:param replace: if True key values are replaced else old key values ar kept.
:param rdhelper: read helper.
:param svhelper: save helper.
:return: None
"""
current = readSession(filepath, rdhelper)
if replace: # update by replacing existing values
current.update(session)
else: # update without replacing existing values
for key in session:
if key not in current:
current[key] = session[key]
saveSession(filepath, current, svhelper) # save updated session
def flushSession(filepath):
"""
Empty session in file.
:param filepath: path to session file.
:return:
"""
readSession(filepath)
saveSession(filepath, {}) # save updated session
def checkFromSession(filepath, varlist):
"""
Check that variables exits in session file.
:param filepath: path to session file.
:param varlist: list of variables to checkLoaded.
:return: list checkLoaded results
"""
current = readSession(filepath)
return [var in current for var in varlist] # checking variables
def deleteFromSession(filepath, varlist):
"""
Delete variables from session file.
:param filepath: path to session file.
:param varlist: list of variables to delete.
:return: None
"""
current = readSession(filepath)
for var in varlist: # deleting variables
del(current[var])
saveSession(filepath, current) # save updated session
| [
"davsamirtor@gmail.com"
] | davsamirtor@gmail.com |
2f8b4d8d7bfacca49fe3a4bfc605113a99c9b400 | 9c93e3da0a209fe9b30514d20615609b2e1a97be | /kids/models.py | 2a09b122f1fd4fac3e180b4d5bca08689fcb84cc | [] | no_license | kblauer/kinder-updates | c817cf1597df1e1bd2a335a8d272e76a4d3b9a88 | 5d8a26f7e08cb28b3ee0a9d2727d32c18e0cf863 | refs/heads/master | 2016-09-05T20:02:54.564794 | 2014-11-26T05:36:27 | 2014-11-26T05:36:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from django.db import models
class Kid(models.Model):
last_name = models.CharField(max_length=30)
first_name = models.CharField(max_length=30)
postTime = models.DateTimeField(auto_now_add=True)
updateTime = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.last_name + " " + self.first_name)
| [
"kyle.g.blauer@gmail.com"
] | kyle.g.blauer@gmail.com |
b9de45a439ddc21460740e683994484351c83573 | fcdce57c1bd0cc4f52679fd0f3f82532550083fa | /267/test_island.py | 1ad4efd9f53a840e3cddb699edb7c0ebe7f07475 | [] | no_license | nishanthegde/bitesofpy | a16a8b5fb99ab18dc1566e606170464a4df3ace0 | c28aa88e1366ab65f031695959d7cd0b3d08be6b | refs/heads/master | 2023-08-08T16:53:17.107905 | 2023-07-22T19:07:51 | 2023-07-22T19:07:51 | 183,959,400 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | import pytest
from island import island_size
rectangle = [[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 0]]
small = [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]
empty = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
whole = [[1, 1, 1],
[1, 0, 1],
[1, 1, 1]]
@pytest.mark.parametrize("map_, expected", [
(rectangle, 12),
(small, 4),
(empty, 0),
(whole, 16),
])
def test_island_size(map_, expected):
assert island_size(map_) == expected | [
"nhegde@netflix.com"
] | nhegde@netflix.com |
54ce917f02bd029282b1cb78ab04b9b83b0fdcce | e173098f9ecd39bef112432a8bb7ed7fb1209fe9 | /wfm_client/migrations/0034_remove_item_threshold.py | b75f3be44de26757ad0634ed0cdc83a4a5265da5 | [] | no_license | isaiahiyede/inventory | 51b639257c14e257ababae047d83caa93b809893 | cedecc5b6d22d977b4bdac00e5faf775da7382ab | refs/heads/master | 2021-08-29T08:41:02.062763 | 2017-12-13T15:46:57 | 2017-12-13T15:46:57 | 114,137,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-10-31 10:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wfm_client', '0033_auto_20161031_0408'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='threshold',
),
]
| [
"a.ajibike@zaposta.com"
] | a.ajibike@zaposta.com |
eea527d293b15752a69e28c3f4dd3834a1ce56df | 45ab4c22d918dc4390572f53c267cf60de0d68fb | /src/Analysis/Engine/Impl/Typeshed/stdlib/3/urllib/parse.pyi | c04bf4e98d8c323efefd70d3b02abf3a30315420 | [
"Apache-2.0",
"MIT"
] | permissive | sourcegraph/python-language-server | 580a24fd15fe9d4abeb95e9333d61db1c11a2670 | 64eae156f14aa14642afcac0e7edaf5d7c6d1a1c | refs/heads/master | 2023-04-09T21:17:07.555979 | 2018-12-06T23:25:05 | 2018-12-06T23:25:05 | 155,174,256 | 2 | 2 | Apache-2.0 | 2018-10-29T08:06:49 | 2018-10-29T08:06:49 | null | UTF-8 | Python | false | false | 5,231 | pyi | # Stubs for urllib.parse
from typing import Any, List, Dict, Tuple, AnyStr, Generic, overload, Sequence, Mapping, Union, NamedTuple, Callable
import sys
_Str = Union[bytes, str]
uses_relative = ... # type: List[str]
uses_netloc = ... # type: List[str]
uses_params = ... # type: List[str]
non_hierarchical = ... # type: List[str]
uses_query = ... # type: List[str]
uses_fragment = ... # type: List[str]
scheme_chars = ... # type: str
MAX_CACHE_SIZE = 0
class _ResultMixinBase(Generic[AnyStr]):
def geturl(self) -> AnyStr: ...
class _ResultMixinStr(_ResultMixinBase[str]):
def encode(self, encoding: str = ..., errors: str = ...) -> '_ResultMixinBytes': ...
class _ResultMixinBytes(_ResultMixinBase[str]):
def decode(self, encoding: str = ..., errors: str = ...) -> '_ResultMixinStr': ...
class _NetlocResultMixinBase(Generic[AnyStr]):
username = ... # type: AnyStr
password = ... # type: AnyStr
hostname = ... # type: AnyStr
port = ... # type: int
class _NetlocResultMixinStr(_NetlocResultMixinBase[str], _ResultMixinStr): ...
class _NetlocResultMixinBytes(_NetlocResultMixinBase[str], _ResultMixinBytes): ...
class _DefragResultBase(tuple, Generic[AnyStr]):
url = ... # type: AnyStr
fragment = ... # type: AnyStr
_SplitResultBase = NamedTuple(
'_SplitResultBase',
[
('scheme', str), ('netloc', str), ('path', str), ('query', str), ('fragment', str)
]
)
_SplitResultBytesBase = NamedTuple(
'_SplitResultBytesBase',
[
('scheme', bytes), ('netloc', bytes), ('path', bytes), ('query', bytes), ('fragment', bytes)
]
)
_ParseResultBase = NamedTuple(
'_ParseResultBase',
[
('scheme', str), ('netloc', str), ('path', str), ('params', str), ('query', str), ('fragment', str)
]
)
_ParseResultBytesBase = NamedTuple(
'_ParseResultBytesBase',
[
('scheme', bytes), ('netloc', bytes), ('path', bytes), ('params', bytes), ('query', bytes), ('fragment', bytes)
]
)
# Structured result objects for string data
class DefragResult(_DefragResultBase[str], _ResultMixinStr): ...
class SplitResult(_SplitResultBase, _NetlocResultMixinStr): ...
class ParseResult(_ParseResultBase, _NetlocResultMixinStr): ...
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase[bytes], _ResultMixinBytes): ...
class SplitResultBytes(_SplitResultBytesBase, _NetlocResultMixinBytes): ...
class ParseResultBytes(_ParseResultBytesBase, _NetlocResultMixinBytes): ...
def parse_qs(qs: AnyStr, keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ...) -> Dict[AnyStr, List[AnyStr]]: ...
def parse_qsl(qs: AnyStr, keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ...) -> List[Tuple[AnyStr, AnyStr]]: ...
@overload
def quote(string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...) -> str: ...
@overload
def quote(string: bytes, safe: _Str = ...) -> str: ...
def quote_from_bytes(bs: bytes, safe: _Str = ...) -> str: ...
@overload
def quote_plus(string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...) -> str: ...
@overload
def quote_plus(string: bytes, safe: _Str = ...) -> str: ...
def unquote(string: str, encoding: str = ..., errors: str = ...) -> str: ...
def unquote_to_bytes(string: _Str) -> bytes: ...
def unquote_plus(string: str, encoding: str = ..., errors: str = ...) -> str: ...
@overload
def urldefrag(url: str) -> DefragResult: ...
@overload
def urldefrag(url: bytes) -> DefragResultBytes: ...
if sys.version_info >= (3, 5):
def urlencode(query: Union[Mapping[Any, Any],
Mapping[Any, Sequence[Any]],
Sequence[Tuple[Any, Any]],
Sequence[Tuple[Any, Sequence[Any]]]],
doseq: bool = ..., safe: AnyStr = ..., encoding: str = ..., errors: str = ...,
quote_via: Callable[[str, AnyStr, str, str], str] = ...) -> str: ...
else:
def urlencode(query: Union[Mapping[Any, Any],
Mapping[Any, Sequence[Any]],
Sequence[Tuple[Any, Any]],
Sequence[Tuple[Any, Sequence[Any]]]],
doseq: bool = ..., safe: AnyStr = ..., encoding: str = ..., errors: str = ...) -> str: ...
def urljoin(base: AnyStr, url: AnyStr, allow_fragments: bool = ...) -> AnyStr: ...
@overload
def urlparse(url: str, scheme: str = ..., allow_fragments: bool = ...) -> ParseResult: ...
@overload
def urlparse(url: bytes, scheme: bytes = ..., allow_fragments: bool = ...) -> ParseResultBytes: ...
@overload
def urlsplit(url: str, scheme: str = ..., allow_fragments: bool = ...) -> SplitResult: ...
@overload
def urlsplit(url: bytes, scheme: bytes = ..., allow_fragments: bool = ...) -> SplitResultBytes: ...
@overload
def urlunparse(components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]) -> AnyStr: ...
@overload
def urlunparse(components: Sequence[AnyStr]) -> AnyStr: ...
@overload
def urlunsplit(components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]) -> AnyStr: ...
@overload
def urlunsplit(components: Sequence[AnyStr]) -> AnyStr: ...
| [
"alsher@microsoft.com"
] | alsher@microsoft.com |
41e75dddd5c8f7552c13a7c2f581eb3176ae495c | 664eaaf3ce1329637966ee85752a4bf792f93d64 | /orders/migrations/0100_auto_20170802_0909.py | b727854a7ca3a1423cdb722ee83042217623dad4 | [] | no_license | rajinder-mohan/thelunchbox | 6fb9f7228490e22da58f59a29601aebec9c8fd35 | 54e3a14229fd72d24152671fb8f0bc13ae83be8c | refs/heads/master | 2021-05-04T16:25:33.015292 | 2018-02-05T03:43:34 | 2018-02-05T03:43:34 | 120,251,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-02 07:09
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('orders', '0099_auto_20170802_0904'),
]
operations = [
migrations.AlterField(
model_name='order',
name='u_processing_date_till',
field=models.DateField(blank=True, default=datetime.datetime(2017, 8, 9, 7, 9, 35, 467764, tzinfo=utc), null=True, verbose_name=b'Processing till date'),
),
]
| [
"rajinder_mohan@esferasoft.com"
] | rajinder_mohan@esferasoft.com |
a6659f9ae7d873b23e7faa1925b36f2aa7546822 | f6e3393dc3ddec266cb57fd20930cf1f2ae05368 | /parttwo/__init__.py | 18c0b43183b7200012a6bcd45fdc95fbea119052 | [] | no_license | cesslab/winners-curse-v8 | 06dfd26fee1db7445de210f6f87d794796ee5cee | 168338017b28f806610794a4dcfbd217b3fe40c4 | refs/heads/master | 2023-07-03T13:54:32.517208 | 2021-08-03T13:15:42 | 2021-08-03T13:15:42 | 383,651,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | import random
from otree.api import BaseGroup, BaseSubsession, models, BasePlayer, widgets
from exp.models import (
BidHistoryPlayer,
save_bid_history_for_all_players,
ExperimentSubSession,
create_player_bid_histories,
)
from exp.db import Phase, close_db
from .views import (
BidInfo, Bid, Instructions, Outcome
)
from .constants import Constants
doc = """
Part II
"""
def creating_session(subsession):
print("executing create sessions for parttwo")
print(f"lotteries={Constants.NUM_LOTTERIES}, rounds per lottery={Constants.ROUNDS_PER_LOTTERY}")
if subsession.round_number == 1:
for player in subsession.get_players():
player.participant.vars['choice_payoff_lottery'] = random.randint(1, Constants.NUM_LOTTERIES)
player.participant.vars['choice_payoff_round'] = random.randint(1, Constants.ROUNDS_PER_LOTTERY)
save_bid_history_for_all_players(subsession.get_players(), Constants.ROUNDS_PER_LOTTERY, Phase.GUESS_PHASE)
close_db()
class Subsession(BaseSubsession, ExperimentSubSession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer, BidHistoryPlayer):
# Input
bid = models.IntegerField(min=0, max=100)
# Payoff
tie = models.BooleanField(initial=False)
win_tie_break = models.BooleanField(initial=False)
winner = models.BooleanField(initial=False)
new_highest_bid = models.IntegerField()
earnings = models.IntegerField()
is_payment_round = models.BooleanField(initial=False)
# Bid History
bid_history_id = models.IntegerField()
previous_session_id = models.IntegerField()
lottery_id = models.IntegerField()
treatment = models.StringField(choices=["cp", "cv"])
lottery_round_number = models.IntegerField()
lottery_order = models.IntegerField()
others_group_id = models.IntegerField()
others_player_id = models.IntegerField()
signal = models.IntegerField()
signal1 = models.IntegerField()
signal2 = models.IntegerField()
signal3 = models.IntegerField()
signal4 = models.IntegerField()
bid1 = models.IntegerField()
bid2 = models.IntegerField()
bid3 = models.IntegerField()
bid4 = models.IntegerField()
player_id = models.IntegerField()
alpha = models.IntegerField()
beta = models.IntegerField()
epsilon = models.IntegerField()
ticket_value_before = models.IntegerField()
ticket_probability = models.IntegerField()
fixed_value = models.IntegerField()
ticket_value_after = models.IntegerField()
up_ticket = models.IntegerField()
others_high_bid = models.IntegerField()
# Player Bid History
rounds_per_lottery = models.IntegerField()
player_bid_history_id = models.IntegerField()
part_round_number = models.IntegerField()
be_bid = models.FloatField()
page_sequence = [Instructions, Bid, Outcome]
| [
"anwarruff@gmail.com"
] | anwarruff@gmail.com |
22a4ddda1f112068bde286fce3381035e66c475a | ded20586a099ba7cbae3ce6c86c6d176243db148 | /test/uniontests.py | b7096433d538ce989151664cb30ad35a53837aab | [] | no_license | rlgomes/dicttools | 0b3ff9f30ea4bfc70636dc9be5361b8644020d05 | 7979f6c347acaf1f3095114b9018859c851c976e | refs/heads/master | 2021-01-01T15:18:27.005247 | 2013-05-25T04:04:23 | 2013-05-25T04:04:23 | 9,741,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | """
verify that the filter() method behaves as expected
"""
import dicttools
import unittest
class UnionTests(unittest.TestCase):
def test_basic_dictionaries(self):
dict1 = { 0: 0, 1: 0 }
dict2 = { 1: 1, 2: 2 }
actual = dicttools.union(dict1, dict2)
expected = { 0: 0, 1: 1, 2: 2 }
self.assertEquals(actual, expected, msg="%s != %s" % (expected, actual))
def test_nested_dictionaries(self):
dict1 = { 0: { 1: False } }
dict2 = { 0: { 1: True } }
actual = dicttools.union(dict1, dict2)
expected = { 0: { 1: True} }
self.assertEquals(actual, expected, msg="%s != %s" % (expected, actual))
| [
"rodneygomes@gmail.com"
] | rodneygomes@gmail.com |
b16ea601652e7d9586cb1f93b9ce93b8ae2b4422 | c29de7ce2d91f572aeb4da56801de7a1dc034054 | /meta_st/experiments/cifar10/exp001_000.py | 0cfe92260bd912170114b4a5c944e4004017b178 | [] | no_license | kzky/works | 18b8d754bfc2b1da22022926d882dfe92ea785e6 | b8708c305e52f924ea5a7071e0dfe5f2feb7a0a3 | refs/heads/master | 2021-01-10T08:04:44.831232 | 2018-03-01T15:09:47 | 2018-03-01T15:09:47 | 54,316,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | from meta_st.cifar10.experiments001 import Experiment000
from meta_st.utils import to_device
from meta_st.cifar10.datasets import Cifar10DataReader, Separator
import numpy as np
import os
import numpy as np
import sys
import time
import chainer.functions as F
from chainer import cuda
from chainer import Variable
def main():
# Settings
device = int(sys.argv[1]) if len(sys.argv) > 1 else None
batch_size = 100
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
T = 3
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
# Separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
zca_path = os.path.join(home, "datasets/cifar10/zca_components.npz")
# DataReader, Model, Optimizer, Losses
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
zca_path=zca_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
exp = Experiment000(
device,
learning_rate,
act,
T,
)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data
x_l0, x_l1, y_l = [Variable(cuda.to_gpu(x, device)) \
for x in data_reader.get_l_train_batch()]
x_u0, x_u1, _ = [Variable(cuda.to_gpu(x, device)) \
for x in data_reader.get_u_train_batch()]
# Train
exp.train(x_l0, x_l1, y_l, x_u0, x_u1)
# Eval
if (i+1) % iter_epoch == 0:
# Get data
x_l, y_l = [Variable(to_device(x, device)) \
for x in data_reader.get_test_batch()]
bs = 100
accs = []
for i in range(0, x_l.shape[0], bs):
accs.append(float(cuda.to_cpu(
exp.test(x_l[i:i+bs, ], y_l[i:i+bs, ]).data)))
msg = "Epoch:{},ElapsedTime:{},Acc:{}".format(
epoch,
time.time() - st,
np.mean(accs))
print(msg)
acc_prev = accs[-1]
st = time.time()
epoch +=1
if __name__ == '__main__':
main()
| [
"rkzfilter@gmail.com"
] | rkzfilter@gmail.com |
a4643e58f65332f011da22c4bdc8ccb39456cc79 | 19c811de2787789fbbc9ab5007ae9780e5b8911a | /01First_week/06Flower.py | 7977498b2e25a1a27f5d405676122cd16c9ae765 | [] | no_license | skafev/Python_OOP | 5948de698464ad3cba358cec666f688b4471a937 | 390d3c3b1b9aa6c44533aa91eabc430dea6b8416 | refs/heads/main | 2023-06-09T19:14:03.611726 | 2021-07-01T16:47:23 | 2021-07-01T16:47:23 | 382,099,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | class Flower:
def __init__(self, name, water_requirements):
self.name = name
self.water_requirements = water_requirements
self.is_happy = False
def water(self, quantity):
if quantity >= self.water_requirements:
self.is_happy = True
def status(self):
if self.is_happy:
return f"{self.name} is happy"
return f"{self.name} is not happy"
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status()) | [
"s.kafev@gmail.com"
] | s.kafev@gmail.com |
4b817ab33947b7eda9efd73e9a8f0750a63c116b | 32e3e0016b06c549ea042127124a870b3b361684 | /ansible/modules/dcos/dcos_secret.py | 7f85dcb29cfb583a9b37868ede5f1ec14a7dfe6d | [
"MIT"
] | permissive | TerryHowe/ansible-modules-dcos | 1698fe875facc8884c11fc3d90ee58a7add23fc0 | ec6e7a10de81cc14d4b5c2e46d90eaf1347ed95f | refs/heads/master | 2021-01-17T17:50:38.429350 | 2016-11-19T00:09:16 | 2016-11-19T00:09:16 | 70,623,881 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | #!/usr/bin/python
DOCUMENTATION = '''
---
module: dcos_secret
short_description: Manage secrets on DCOS
options:
path:
description:
- Path of secret.
required: true
key:
description:
- key of secret.
required: false
value:
description:
- Value of secret.
required: false
state:
description:
- If C(present), ensure the secret exists with all the given
value. If C(absent), ensure the secret does not exists.
Defaults to C(present).
required: false
default: present
choices: [ present, absent ]
'''
EXAMPLES = '''
- name: Create the ACL for package management
dcos_secret:
path: "azurediamond/password"
value: "hunter2"
- name: Remove the ACL for package management
dcos_secret:
path: "azurediamond/password"
state: absent
'''
from ansible.module_utils.basic import *
from ansible.module_utils import dcos
def dcos_secret_absent(params):
client = dcos.DcosClient(service_path='/secrets/v1')
result = client.delete('/secret/default/{}'.format(params['path']))
if result['status_code'] == 404:
result['failed'] = False
result['rc'] = 0
module.exit_json(**result)
def dcos_secret_present(params):
client = dcos.DcosClient(service_path='/secrets/v1')
body = {
params['key']: params['value']
}
path = '/secret/default/{}'.format(params['path'])
result = client.put(path, body)
if result['changed']:
module.exit_json(**result)
elif result['status_code'] != 409:
module.fail_json(**result)
result = client.patch(path, body)
module.exit_json(**result)
def dcos_secret_get(params):
client = dcos.DcosClient(service_path='/secrets/v1')
path = '/secret/default/{}'.format(params['path'])
result = client.get(path)
data = result['json']
if params['key'] in data:
result['value'] = data[params['key']]
module.exit_json(**result)
def main():
global module
module = AnsibleModule(argument_spec={
'path': { 'type': 'str', 'required': True },
'key': { 'type': 'str', 'required': False, 'default': 'value' },
'value': { 'type': 'str', 'required': False },
'state': {
'type': 'str',
'required': False,
'default': 'present',
'choices': [ 'present', 'absent' ]
},
})
if module.params['state'] == 'present':
if (module.params['value']):
dcos_secret_present(module.params)
else:
dcos_secret_get(module.params)
dcos_secret_absent(module.params)
if __name__ == '__main__':
main()
| [
"terrylhowe@gmail.com"
] | terrylhowe@gmail.com |
38a9c6f96dbcbb8bb74393aaf2bf5e968b0adf25 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/gcd_20200622234501.py | 6c803a63102c69b4aa607a61b7f1a772f94dd2dd | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | def gcd(a,b):
while b !=1:
# t = a
# a = b
b = a % b
print(b)
return b
print(gcd(20,8)) #where a >b ,when b !=0 then b = a%b | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
821deee605ee700ba4695251ab5ff955d72e972f | bf2b0de62b3ffde9a1684c3eab3e7a62937b5ce5 | /arsenal/math/__init__.py | 2bdb8c3642e9384dee5029ea2539d4d2c70119a0 | [] | no_license | srhrshr/arsenal | 4b3c774578912ada1e8ae77245629c9cf7b4652d | 28574e52f70c895399ed259bb743b6c8f3dd6628 | refs/heads/master | 2021-09-06T22:52:21.723774 | 2018-02-12T22:49:11 | 2018-02-12T22:49:11 | 122,544,738 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #from math import *
from util import *
from compare import compare, check_equal, pp_plot
from checkgrad import fdcheck
import stepsize
| [
"tim.f.vieira@gmail.com"
] | tim.f.vieira@gmail.com |
ebbd64c890bdc56ff7fd43f881a498f4d7d404aa | 24710de9897e9647fc7b802dc9b0cfdcfaa4b82f | /nengo_alt/nengo/utils/tests/test_distributions.py | cf1fcf5b61fdd9067e2e899a0db02ed6d0ec8f84 | [] | no_license | jaberg/nengo_alt | da6e94afea3f7cf03d56bc9274181da67fcde2d4 | 18f16a03b851de80b2f0382a464cfaadc050af5c | refs/heads/master | 2016-09-05T23:28:20.980908 | 2014-05-06T17:01:58 | 2014-05-06T17:01:58 | 9,757,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import numpy as np
import pytest
import nengo
import nengo.utils.distributions as dists
import nengo.utils.numpy as npext
@pytest.mark.parametrize("low,high", [(-2, -1), (-1, 1), (1, 2), (1, -1)])
def test_uniform(low, high):
n = 100
dist = dists.Uniform(low, high)
samples = dist.sample(n, np.random.RandomState(1))
if low < high:
assert np.all(samples >= low)
assert np.all(samples < high)
else:
assert np.all(samples <= low)
assert np.all(samples > high)
hist, _ = np.histogram(samples, bins=5)
assert np.allclose(hist - np.mean(hist), 0, atol=0.1 * n)
@pytest.mark.parametrize("mean,std", [(0, 1), (0, 0), (10, 2)])
def test_gaussian(mean, std):
n = 100
if std <= 0:
with pytest.raises(ValueError):
dist = dists.Gaussian(mean, std)
else:
dist = dists.Gaussian(mean, std)
samples = dist.sample(n, np.random.RandomState(1))
assert abs(np.mean(samples) - mean) < std * 0.1
assert abs(np.std(samples) - std) < 1
@pytest.mark.parametrize("dimensions", [0, 1, 2, 5])
def test_hypersphere(dimensions):
n = 100 * dimensions
if dimensions < 1:
with pytest.raises(ValueError):
dist = dists.UniformHypersphere(dimensions)
else:
dist = dists.UniformHypersphere(dimensions)
samples = dist.sample(n, np.random.RandomState(1))
assert samples.shape == (n, dimensions)
assert np.allclose(
np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
hist, _ = np.histogramdd(samples, bins=5)
assert np.allclose(hist - np.mean(hist), 0, atol=0.1 * n)
@pytest.mark.parametrize("dimensions", [1, 2, 5])
def test_hypersphere_surface(dimensions):
n = 100 * dimensions
dist = dists.UniformHypersphere(dimensions, surface=True)
samples = dist.sample(n, np.random.RandomState(1))
assert samples.shape == (n, dimensions)
assert np.allclose(npext.norm(samples, axis=1), 1)
assert np.allclose(
np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
if __name__ == "__main__":
nengo.log(debug=True)
pytest.main([__file__, '-v'])
| [
"james.bergstra@gmail.com"
] | james.bergstra@gmail.com |
bab33d217ffbe61928f608bb51a8e3f69e1e9c60 | 1fe56144905244643dbbab69819720bc16031657 | /.history/books/api/views_20210424171408.py | 6f909574d72eaf12d2b6a30a1c10991f39c1a6e0 | [] | no_license | RaghdaMadiane/django | 2052fcdd532f9678fefb034bd60e44f466bd9759 | 6ca3f87f0b72880f071d90968f0a63ea5badcca8 | refs/heads/master | 2023-04-15T17:28:25.939823 | 2021-04-24T22:33:21 | 2021-04-24T22:33:21 | 361,279,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | from rest_framework.response import Response
from rest_framework import status
from books.models import Book ,Isbn
from .serializers import BookSerializer ,UserSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view , permission_classes
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
# class IsViewer(BasePermission):
# def has_permission(self, request, view):
# return request.user.groups.filter(name="viewers").exists()
@api_view(["POST"])
def api_signup(request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={
"success": True,
"message": "User has been registered successfully"
}, status=status.HTTP_201_CREATED)
return Response(data={
"success": False,
"errors": serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def index(request):
books=Isbn.objects.all()
serializer=BookSerializer(instance=books , many=True)
return Response(data=serializer.data,status=status.HTTP_200_OK)
@api_view(["POST"])
def create(request):
serializer=BookSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={
"success":True,
"message":"Book has been created successfully "
},status=status.HTTP_201_CREATED)
return Response(data={
"success":False,
"errors":serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
@api_view(["PUT"])
def update(request, id):
isbn=Isbn.objects.get(pk=id)
isbn_data = JSONParser().parse(request)
isbn_serializer = BookSerializer(isbn, data=isbn_data)
if isbn_serializer.is_valid():
isbn_serializer.save()
return JsonResponse(isbn_serializer.data)
return JsonResponse(isbn_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["DELETE"])
def destroy(request, id):
try:
isbn=Isbn.objects.get(pk=id)
isbn.delete()
return Response(data={
"success":True,
"message":"Book has been deleted successfully",
},
status=status.HTTP_200_OK,
)
except Isbn.DoesNotExist:
return JsonResponse({'message': 'The book does not exist'}, status=status.HTTP_404_NOT_FOUND)
| [
"raghdamadiane@gmail.com"
] | raghdamadiane@gmail.com |
4889af8f198c504a06fcd503c39bf621149a7926 | e9a38c6a00b49dc8fb0e5bef9e63bd30c7748db2 | /Coil_Codes/CoIL_Test_On_Simulator/PythonClient_py2 (ForJetson)/our_drive_new.py | 27abb6d8b4add83a023f63647e9a0c33d688e021 | [] | no_license | rohitgajawada/chauffeur_GAN | 2736f3bd6bb292b3cff7619a4fec0e656db200a0 | 01c0078454c2d7d02ac0e7b7e9f99efc606ed2ea | refs/heads/master | 2020-03-20T13:55:55.828292 | 2019-02-04T19:57:32 | 2019-02-04T19:57:32 | 137,470,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,979 | py |
import traceback
import sys
import logging
import json
import datetime
import numpy as np
import os
import time
import subprocess
import socket
import torch
from contextlib import closing
from carla.tcp import TCPConnectionError
from carla.client import make_carla_client
from carla.driving_benchmark import run_driving_benchmark
from drive import CoILAgent
from drive import ECCVTrainingSuite
from drive import ECCVGeneralizationSuite
from testing.unit_tests.test_drive.test_suite import TestSuite
from logger import coil_logger
from logger import monitorer
from configs import g_conf, merge_with_yaml, set_type_of_process
from utils.checkpoint_schedule import maximun_checkpoint_reach, get_next_checkpoint, is_next_checkpoint_ready
def frame2numpy(frame, frameSize):
return np.resize(np.fromstring(frame, dtype='uint8'), (frameSize[1], frameSize[0], 3))
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
def start_carla_simulator(gpu, exp_batch, exp_alias, city_name):
port = find_free_port()
carla_path = os.environ['CARLA_PATH']
#os.environ['SDL_VIDEODRIVER'] = 'offscreen'
#os.environ['SDL_HINT_CUDA_DEVICE'] = str(gpu)
#subprocess.call()
sp = subprocess.Popen([carla_path + '/CarlaUE4/Binaries/Linux/CarlaUE4', '/Game/Maps/' + city_name
, '-windowed', '-benchmark', '-fps=10', '-world-port='+str(port)], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
coil_logger.add_message('Loading', {'CARLA': carla_path + '/CarlaUE4/Binaries/Linux/CarlaUE4' + '/Game/Maps/' + city_name
+ '-windowed'+ '-benchmark'+ '-fps=10'+ '-world-port='+ str(port)})
return sp, port
# OBS: note, for now carla and carla test are in the same GPU
# TODO: Add all the necessary logging.
# OBS : I AM FIXING host as localhost now
# OBS : Memory use should also be adaptable lets leave it fixed for now
def execute(gpu, exp_batch, exp_alias, city_name='Town01', memory_use=0.2, host='127.0.0.1'):
# host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name
#drive_config.city_name = city_name
# TODO Eliminate drive config.
print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias)
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
sys.stdout = open('drive_' + city_name + '_' + str(os.getpid()) + ".out", "a", buffering=1)
#vglrun - d:7.$GPU $CARLA_PATH / CarlaUE4 / Binaries / Linux / CarlaUE4 / Game / Maps /$TOWN - windowed - benchmark - fps = 10 - world - port =$PORT;
#sleep 100000
carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias, city_name)
merge_with_yaml(os.path.join('configs', exp_batch, exp_alias+'.yaml'))
set_type_of_process('drive', city_name)
log_level = logging.WARNING
logging.StreamHandler(stream=None)
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
# TODO we have some external class that control this weather thing.
"""
if city_name == 'Town01':
experiment_suite = ECCVTrainingSuite()
else:
experiment_suite = ECCVGeneralizationSuite()
"""
experiment_suite = TestSuite()
coil_logger.add_message('Loading', {'Poses': experiment_suite._poses()})
while True:
try:
coil_logger.add_message('Loading', {'CARLAClient': host+':'+str(port)})
with make_carla_client(host, port) as client:
# Now actually run the driving_benchmark
latest = 0
# While the checkpoint is not there
while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE):
# Get the correct checkpoint
if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE):
latest = get_next_checkpoint(g_conf.TEST_SCHEDULE)
checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias
, 'checkpoints', str(latest) + '.pth'))
coil_agent = CoILAgent(checkpoint)
coil_logger.add_message({'Iterating': {"Checkpoint": latest}})
# TODO: Change alias to actual experiment name.
run_driving_benchmark(coil_agent, experiment_suite, city_name,
exp_batch + '_' + exp_alias + '_' + str(latest)
, False, host, port)
# Read the resulting dictionary
#with open(os.path.join('_benchmark_results',
# exp_batch+'_'+exp_alias + 'iteration', 'metrics.json')
# , 'r') as f:
# summary_dict = json.loads(f.read())
# TODO: When you add the message you need to check if the experiment continues properly
# TODO: WRITE AN EFICIENT PARAMETRIZED OUTPUT SUMMARY FOR TEST.
#test_agent.finish_model()
#test_agent.write(results)
else:
time.sleep(0.1)
break
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
carla_process.kill()
break
except KeyboardInterrupt:
carla_process.kill()
coil_logger.add_message('Error', {'Message': 'Killed By User'})
break
except:
traceback.print_exc()
carla_process.kill()
coil_logger.add_message('Error', {'Message': 'Something Happened'})
break
carla_process.kill()
| [
"rishabhmadan96@gmail.com"
] | rishabhmadan96@gmail.com |
6d45f67728f43e16c80d6baa3ad84eb0c617558f | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/brgpat004/question3.py | 05472c444c585a4794e99b29f21ea6d96dd498a9 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | '''program to check if a complete Sudoku grid is valid
Patrick Boroughs
10 May 2014
'''
#input lines
line1=list(input())
line2=list(input())
line3=list(input())
line4=list(input())
line5=list(input())
line6=list(input())
line7=list(input())
line8=list(input())
line9=list(input())
#create 2d array
grid=[line1,line2,line3,line4,line5,line6,line7,line8,line9]
#test for repeated characters along vertical lines
def testvertical(grid):
for x in range(9):
for y in range(8):
for j in range(y+1,9):
if grid[y][x]==grid[j][x]:
return False
return True
#test for repeated characters along horizontal lines
def testhorizontal(grid):
for y in range(9):
for x in range(8):
for j in range(x+1,9):
if grid[y][x]==grid[y][j]:
return False
return True
#test for repeated characters within subgrids
def testsquare(grid):
xstart = 0
ystart = 0
#flag that all 9 grids have been tested
flag=True
while flag:
#array where values already in subgrid are stored
subgrid = []
for y in range(0+ystart,3+ystart):
for x in range(0+xstart,3+xstart):
#repition found
if grid[y][x] in subgrid:
return False
else:
subgrid.append(grid[y][x])
#test next block to right
if xstart<6:
xstart += 3
#test first block in next row
elif ystart<6:
ystart += 3
xstart = 0
#all blocks checked
elif xstart == 6 and ystart ==6:
flag=False
return True
#if all tests are true
if testvertical(grid) and testhorizontal(grid) and testsquare(grid):
print("Sudoku grid is valid")
else:
print("Sudoku grid is not valid") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
30722fc2bf9332ff897320fc76aa632519053b64 | 11a246743073e9d2cb550f9144f59b95afebf195 | /codeforces/849/b.py | 12ecf3c7fec877cf676e718b7f2fe9c9dbf6acd8 | [] | no_license | ankitpriyarup/online-judge | b5b779c26439369cedc05c045af5511cbc3c980f | 8a00ec141142c129bfa13a68dbf704091eae9588 | refs/heads/master | 2020-09-05T02:46:56.377213 | 2019-10-27T20:12:25 | 2019-10-27T20:12:25 | 219,959,932 | 0 | 1 | null | 2019-11-06T09:30:58 | 2019-11-06T09:30:57 | null | UTF-8 | Python | false | false | 1,082 | py | def covers(n, a, num, den):
# a[i] = num / den * i + a[0]
# den * (a[i] - a[0]) = num * i
start = a[0]
used = [False] * n
used[0] = True
for i in range(1, n):
if den * (a[i] - a[0]) == num * i:
used[i] = True
first_unused = -1
for i in range(n):
if not used[i]:
first_unused = i
break
if first_unused == -1:
return False
for i in range(n):
if used[i]:
continue
if den * (a[i] - a[first_unused]) == num * (i - first_unused):
used[i] = True
return all(used)
def main():
n = int(input())
a = list(map(int, input().split()))
if n < 2:
print('No')
elif n == 2:
print('Yes')
else:
diffs = set(a[i] - a[i - 1] for i in range(2, n))
d = list(diffs)[0]
solved = (len(diffs) == 1 and a[1] - a[0] != d)
for i in range(1, n):
if covers(n, a, a[i] - a[0], i):
solved = True
break
print('Yes' if solved else 'No')
main()
| [
"arnavsastry@gmail.com"
] | arnavsastry@gmail.com |
4f39481237dbb459784950665781e548a8a95378 | cde11aea86ce9e1e370b02fb14553358b4aaab8b | /practice/medium/_30_remove_nth_node_from_end.py | 89f0fb5e1577835d35726fe23b0a203db6a16400 | [] | no_license | pavankumarag/ds_algo_problem_solving_python | 56f9a2bb64dd62f16028c3f49a72542b8588369a | cbd323de31f2f4a4b35334ce3249bb3e9525dbf8 | refs/heads/master | 2023-06-21T20:29:41.317005 | 2023-06-10T18:11:39 | 2023-06-10T18:11:39 | 223,919,558 | 2 | 1 | null | 2023-06-10T18:11:40 | 2019-11-25T10:16:27 | Python | UTF-8 | Python | false | false | 1,849 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
def insert_at_beginning(head, val):
if head is None:
head = Node(val)
return head
new_node = Node(val)
new_node.next = head
head = new_node
return head
def print_list(head):
curr = head
while curr is not None:
print curr.data,
curr = curr.next
def delete_nth_at_end(head, n):
'''
Brute force method, where count the number of node and delete from the delete from the beginning
:param head:
:param n:
:return:
'''
count = 1
curr = head
while curr.next is not None:
count += 1
curr = curr.next
k = count - n + 1
if k > 1:
temp = head
count = 1
while count < k - 1:
temp = temp.next
count += 1
print "Node deleted is ", temp.next.data
temp.next = temp.next.next
elif k == 1:
print "Node deleted is ", head.data
head = head.next
else:
print "Please enter valid position"
return -1
return head
def delete_nth_at_end_optimised(head, n):
first = second = head
for i in range(n):
if second.next is None:
# If count of nodes in the given list is less than 'n'
if i == n-1: # If index = n then delete the head node
head = head.next
return head
second = second.next
while second.next is not None:
second = second.next
first = first.next
print "\nNode deleted is", first.next.data
first.next = first.next.next
return head
if __name__ == "__main__":
head = insert_at_beginning(None, 2)
head = insert_at_beginning(head, 3)
head = insert_at_beginning(head, 4)
head = insert_at_beginning(head, 5)
head = insert_at_beginning(head, 6)
head = insert_at_beginning(head, 7)
print_list(head)
print
head = delete_nth_at_end(head, 3)
print
print_list(head)
head = insert_at_beginning(head, 8)
print
print_list(head)
head = delete_nth_at_end_optimised(head, 3)
print
print_list(head)
| [
"pavan.govindraj@nutanix.com"
] | pavan.govindraj@nutanix.com |
bdfb51204835e93ef2cf2157082654690e8052b9 | 1d9ab83a59139f1b59f20451146499c7f2c1fe2e | /00web框架的本质/15模板函数uimodule/start.py | 4cd7f1696b4b1bea0d983526b1b40fe9e7e4e386 | [] | no_license | lannyMa/tornado_info | df1361d9ee5ec418cb3cb7681cb45a0f88560466 | fe2c4dfb6d882f5ce750322bcd496c8484452da1 | refs/heads/master | 2021-01-23T06:06:04.913158 | 2017-09-15T02:55:20 | 2017-09-15T02:55:20 | 102,489,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | #!/usr/bin/env python
# coding=utf-8
import tornado.web
import uimodule as md
# 业务逻辑处理模块
INPUT_LIST=[]
class MainHandler(tornado.web.RequestHandler):
def get(self):
# self.write("Hello, world")
# self.render("bbs.html",names=INPUT_LIST)
self.render("bbs.html", npm="NPM", names=INPUT_LIST)
def post(self,*args,**kwargs):
name = self.get_argument("name")
INPUT_LIST.append(name)
self.render("bbs.html", npm="NPM", names=INPUT_LIST)
# 配置选项模块
settings = {
"template_path":"templates",
"static_path":"statics",
"ui_modules": md,
}
# 路由模块
application = tornado.web.Application([
(r"/index", MainHandler)],
**settings)
## wsgi模块
if __name__ == "__main__":
print("http://127.0.0.1:8888/index")
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| [
"iher@foxmail.com"
] | iher@foxmail.com |
48b7ecfe463e752a0be32f5bfc1ab439178b973d | 0bb49acb7bb13a09adafc2e43e339f4c956e17a6 | /OpenNodes/OpenCoOrdination/getShotsFarmSetup.py | 1c2f9e9ce9f4eaea343698f0ed63bc1f62533322 | [] | no_license | all-in-one-of/openassembler-7 | 94f6cdc866bceb844246de7920b7cbff9fcc69bf | 69704d1c4aa4b1b99f484c8c7884cf73d412fafe | refs/heads/master | 2021-01-04T18:08:10.264830 | 2010-07-02T10:50:16 | 2010-07-02T10:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | ###OpenAssembler Node python file###
'''
define
{
name getShotsFarmSetup
tags ocoo
input string Project "" ""
input string Sequence "" ""
input string Shot "" ""
output array1D farmsetup "" ""
}
'''
import os, sys
from OpenProject.getAttribute import getAttribute
class getShotsFarmSetup():
def getShotsFarmSetup_main(self, **connection):
try:
Project=str(connection["Project"])
except:
Project=""
try:
Sequence=str(connection["Sequence"])
except:
Sequence=""
try:
Shot=str(connection["Shot"])
except:
Shot=""
try:
dpath=":"+str(Project)+":Movie:"+str(Sequence)+":"+str(Shot)
pri=gA(dpath+".farm_priority")
if pri=="" or str(pri)=="0":
pri="99"
mem=gA(dpath+".farm_memory")
if mem=="" or str(mem)=="0":
mem="3000"
hm=gA(dpath+".farm_hostmask")
if str(hm)=="0":
hm=""
he=gA(dpath+".farm_hostexclude")
if str(he)=="0":
he=""
return [pri,mem,hm,he]
except:
return ["99","3000","",""]
def gA(Path):
return getAttribute().getAttribute_main(Path=Path) | [
"laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771"
] | laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771 |
b60543a49f3b61b6f87714073f27f0cc30f52358 | cfb4e8721137a096a23d151f2ff27240b218c34c | /mypower/matpower_ported/lib/t/t_opf_userfcns.py | b1f56e79c782e8f409f6b0d392ae82592f0746de | [
"Apache-2.0"
] | permissive | suryo12/mypower | eaebe1d13f94c0b947a3c022a98bab936a23f5d3 | ee79dfffc057118d25f30ef85a45370dfdbab7d5 | refs/heads/master | 2022-11-25T16:30:02.643830 | 2020-08-02T13:16:20 | 2020-08-02T13:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def t_opf_userfcns(*args,nout=1,oc=None):
if oc == None:
from ....oc_matpower import oc_matpower
oc = oc_matpower()
return oc.t_opf_userfcns(*args,nout=nout)
| [
"muhammadyasirroni@gmail.com"
] | muhammadyasirroni@gmail.com |
2c42be1d0682c7616e8d2eace0b905b4f5840ef3 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Introdução à Programação-Python/Capitulo 11/Exemplos/listagem11_8.py | 60b60fe1d4d1e50d89d164f60698821422b10bb9 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # Program: Listagem11_8.py
# Author: Ramon R. Valeriano
# Description: Consulta com filtro
# Developed: 15/03/2020 - 15:54
import sqlite3
name = str(input("Digite o nome que deseja procurar: "))
name = name.strip()
conexao = sqlite3.connect('agenda.db')
cursor = conexao.cursor()
cursor.execute('select * from agenda where nome = ?', (name,))
while True:
resultado = cursor.fetchone()
if resultado == None:
break
print(f'Nome: {resultado[0]} Telefone: {resultado[1]}')
cursor.close()
conexao.close() | [
"rrvaleriano@gmail.com"
] | rrvaleriano@gmail.com |
638d509921f501d47ebdae410bd3b448bb41b732 | 98cb310b3a8dea5e07dc2359a07ef623e9a153d1 | /web-env/bin/chardetect | c4ca2be6b6d6a3ade306fdd5d93715dc608f8704 | [
"MIT"
] | permissive | Amirsorouri00/web-search-engine | 6c600fb924f3b2e883f746e8075e33954effcc79 | 00bf463b29490f5285ee44cd351c6de131f04f3a | refs/heads/master | 2020-06-01T12:46:37.612714 | 2019-07-24T14:25:54 | 2019-07-24T14:25:54 | 190,783,910 | 0 | 0 | MIT | 2019-06-07T17:30:51 | 2019-06-07T17:30:51 | null | UTF-8 | Python | false | false | 317 | #!/home/amirsorouri00/Desktop/search-engine/myproject/ui/search-engine/web-search-engine/web-env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amirsorouri26@gmail.com"
] | amirsorouri26@gmail.com | |
ea66cb4d02c5b8b88e48e54fee4ce3bdc48b1ac3 | a499fbdd93f85a286505433a08afc25d84c8ff04 | /tests/python/unittest/test_tir_analysis_detect_buffer_access_lca.py | 8c2b2710f1ba5d23a0e98dc44352bfa17a6fdf80 | [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | elphinkuo/tvm | a81e0ccc5950a1473efdcdbb8263de9adbe36787 | 9df2ae8eaa8b394013182a7ad09ac57fe401f80e | refs/heads/main | 2023-08-05T07:41:18.652097 | 2021-09-28T00:38:26 | 2021-09-28T00:38:26 | 411,311,927 | 2 | 0 | Apache-2.0 | 2021-09-28T14:51:56 | 2021-09-28T14:17:46 | null | UTF-8 | Python | false | false | 5,279 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm.script import ty
@tvm.script.tir
def buffer_load_store_func(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128), "float32")
B = tir.match_buffer(b, (128, 128), "float32")
C = tir.alloc_buffer((128, 128), "float32")
D = tir.alloc_buffer((128, 128), "float32")
with tir.block([128, 128]) as [i, j]:
A[i, j] = tir.float32(0)
with tir.block([32, 32, tir.reduce_axis(0, 32)]) as [i, j, k]:
with tir.init():
for ii, jj in tir.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in tir.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
@tvm.script.tir
def buffer_opaque_access(b: ty.handle, c: ty.handle) -> None:
B = tir.match_buffer(b, [16, 16], "float32")
C = tir.match_buffer(c, [16, 16], "float32")
with tir.block([]):
tir.reads([])
tir.writes(B[0:16, 0:16])
A = tir.allocate([256], "float32", "global")
for i, j in tir.grid(16, 16):
tir.store(A, i * 16 + j, 1)
for i in range(0, 16):
for j in range(0, 16):
tir.evaluate(tir.load("float32", A, i * 16 + j))
for j in range(0, 16):
tir.evaluate(
tir.tvm_fill_fragment(B.data, 16, 16, 16, 0, tir.float32(0), dtype="handle")
)
for i, j in tir.grid(16, 16):
with tir.block([16, 16]) as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
C[vi, vj] = B[vi, vj]
@tvm.script.tir
def lca_is_func_root(a: ty.handle) -> None:
A = tir.match_buffer(a, [0, 0], "float32")
A.data[0] = 1.0
@tvm.script.tir
def match_buffer_func(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128), "float32")
B = tir.match_buffer(b, (128, 128), "float32")
with tir.block([8, 8], "block") as [vi, vj]:
tir.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
tir.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
B0 = tir.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = tir.match_buffer(B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8))
with tir.block([16, 16], "AAA") as [i, j]:
AA = tir.match_buffer(A[i, j], ())
AA[()] = 1.0
tir.evaluate(B0.data)
tir.evaluate(B1.data)
def test_buffer_load_store():
func = buffer_load_store_func
A, B = [func.buffer_map[x] for x in func.params]
C, D = func.body.block.alloc_buffers
lca = tir.analysis.detect_buffer_access_lca(func)
# LCA of Buffer A is root
root_block = func.body.block
assert lca[A] == func.body.block
# LCA of Buffer B is reduction block
reduce_block = root_block.body[1].body.body.body.block
assert lca[B] == reduce_block
# LCA of Buffer C is the second loop kk
loop_jj = reduce_block.body.body
assert lca[C] == loop_jj
# LCA of Buffer D is loop jj
loop_kk = loop_jj.body[1]
assert lca[D] == loop_kk
def test_opaque_access():
func = buffer_opaque_access
B, C = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
# Cannot detect buffer A since it is define by low-level Allocate
# LCA of Buffer B is root
root_block = func.body.block
assert lca[B] == func.body.block
# LCA of Buffer C is the correspond block
assert lca[C] == root_block.body[1].body.body.block
def test_lca_func_root():
func = lca_is_func_root
(A,) = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
assert lca[A] is None
def test_match_buffer():
func = match_buffer_func
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
# LCA of Buffer C is the inner block
assert lca[A] == block_inner
# LCA of Buffer C is the main block
assert lca[B] == block
if __name__ == "__main__":
test_buffer_load_store()
test_opaque_access()
test_lca_func_root()
test_match_buffer()
| [
"noreply@github.com"
] | elphinkuo.noreply@github.com |
659664bcd13c89ae3ce988800760bb90b493164f | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210902214041.py | 974d46d39de7c1acfd16b327f8391409fa5123da | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,335 | py | class Nomina:
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr="",email="",estado="",profe="",dep="", valhora=0,hotraba=0):#3
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.departamento=dep
self.valorhora=valhora
self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
class Empresa(Nomina):
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("Datos de la Empresa")
print("La empresa {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Nomina):
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):#falta dos atributo como definicion de oficina
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {} \n".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
if eleccion==1:
print("De estado civil",self.estadocivil)
elif eleccion==2:
print("Con profesion de",self.profesion)
# class Pagos(Nomina):
# def pagoNormal(self):
#
# def pagoExtra(self, valhora,hoesti,hotraba,incentivos):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.bono=incentivos
# def Nomina(self, nom, valhora,hoesti,hotraba, desc, desper,incentivos):#faltan 8 atributos incluir cosas del empleado y sobretiempo
# self.nombre= nom
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# self.bono=incentivos
nomi=Nomina()
emp=Empresa()
emp.datosEmpresa()
emp.mostrarEmpresa()
emple=Empleado()
emple.empleado()
eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
if eleccion==1:
emple.empleadoObrero()
elif eleccion==2:
emple.empleadoOficina()
else:
print("No selecciono el tipo de empleado")
emple.mostrarempleado() | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
8962610fff4cb0c04e27f48269c602230f5ac6d2 | 72d6b3ab3fc2c7014967a156de082d1c617cbf0f | /网优日常/3G4G告警处理及时率计算/3G告警处理及时率计算.py | 4ba1d2f308fa117307d4cdd3906821087ac2f52f | [] | no_license | fengmingshan/python | 19a1732591ad061a8291c7c84e6f00200c106f38 | b35dbad091c9feb47d1f0edd82e568c066f3c6e9 | refs/heads/master | 2021-06-03T08:35:50.019745 | 2021-01-19T15:12:01 | 2021-01-19T15:12:01 | 117,310,092 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 09:43:33 2020
@author: Administrator
"""
import pandas as pd
import os
path = r'C:\Users\Administrator\Desktop\3G历史告警'
os.chdir(path)
files = os.listdir()
li1 = []
for f in files:
df = pd.read_excel(f, encoding = 'gbk')
li1.append(df)
df_3g = pd.concat(li1,axis = 0)
df_3g.告警对象.head(50)
df_3g.columns
及时率 = len(df_3g['编号'][df_3g['持续时间']<=5760])/len(df_3g['编号'])
| [
"fms_python@163.com"
] | fms_python@163.com |
a0c7d02f4e2bb8a7b1c17633b3ac5045a40c65bc | 9c2edc273db48dcb6d31a937510476b7c0b0cc61 | /image_processing/del_file.py | a596e1fc0c663ee3dd03961d3ccd8f904835c1cc | [] | no_license | miyamotok0105/python_sample | 4d397ac8a3a723c0789c4c3e568f3319dd754501 | 77101c981bf4f725acd20c9f4c4891b29fbaea61 | refs/heads/master | 2022-12-19T22:53:44.949782 | 2020-05-05T05:09:22 | 2020-05-05T05:09:22 | 81,720,469 | 1 | 0 | null | 2022-11-22T02:22:55 | 2017-02-12T11:15:08 | Jupyter Notebook | UTF-8 | Python | false | false | 729 | py | # -*- coding: utf-8 -*-
import os
import sys
import cv2
import glob
import numpy as np
in_dir_path = ""
out_dir_path = ""
in_dir_path = "labels/*.txt"
diff_dir_path = "train/*.png"
for name in glob.glob(in_dir_path):
file_name = os.path.basename(name)
exists = False
for name_diff in glob.glob(diff_dir_path):
diff_file_name = os.path.basename(name_diff)
if diff_file_name.replace(".png", "") == file_name.replace(".txt", ""):
exists = True
if exists == True:
# print(file_name)
a = ""
else:
print(name)
try:
os.remove(name)
except Exception as e:
print("exception args:", e.args)
fuga()
| [
"miyamotok0105@gmail.com"
] | miyamotok0105@gmail.com |
4a2564f4fccabfdd9f67e3a44e32ad2d2b585f8f | 780a18c55af7a8744b408e1efd4aaf08a0d3a3e7 | /passbook/features/mail.py | 8d8ee26c780d1e465d514a4f77e380bdb5745750 | [] | no_license | squidnee/passbook | 86507c6675122f1b67333f55048eb55f3dff664a | 551de76b95049185820a3fc8729fbc126c423994 | refs/heads/master | 2020-03-22T05:27:31.718897 | 2018-07-21T12:30:32 | 2018-07-21T12:30:32 | 139,567,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # -*- coding: utf-8 -*-
"""
For asynchronous emailing from within the application.
"""
from threading import Thread
from flask import current_app as app
from flask import render_template
from flask_mail import Message
from passbook.features.extensions import mail
mail.init_app(app)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(title, sender, recipients, text, html):
msg = Message(title, sender=sender, recipients=recipients)
msg.body = text
msg.html = html
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Password Manager] Reset Your Password', sender=app.config['MAIL_FROM_EMAIL'], recipients=[user.email],
text=render_template('mail/reset_password.txt', user=user, token=token),
html=render_template('mail/reset_password.html', user=user, token=token)
) | [
"smaples@stanford.edu"
] | smaples@stanford.edu |
dde509f06845addfd86028a5fd9f666a831e08a1 | fe7133ea8e879631e63ef3c5312670464ae0970b | /polynom_tester.py | 5b4e8e07f2f6c7b9bf3f831189b014450b77420a | [] | no_license | jonathaw/general_scripts | 4f13c55d3544b829488c1d479c2feff1a6c26829 | 0cf47ab3ade55b9396cb5aea00e09dafd2694067 | refs/heads/master | 2021-01-17T02:50:54.936936 | 2017-03-19T17:18:44 | 2017-03-19T17:18:44 | 41,436,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | #!/usr/bin/env python3.5
import sys
c4 = float(sys.argv[1])
c3 = float(sys.argv[2])
c2 = float(sys.argv[3])
c1 = float(sys.argv[4])
c0 = float(sys.argv[5])
z = float(sys.argv[6])
print('found these c4 %f, c3 %f c2 %f, c1 %f and c0 %f. z is %f' % (c4, c3, c2, c1, c0, z))
res = c4*z**4 + c3*z**3 + c2*z**2 + c1*z +c0
print('result is %f' % res) | [
"jonathan.weinstein@weizmann.ac.il"
] | jonathan.weinstein@weizmann.ac.il |
ff2b36dd9a9efbe06dd7815e407890549038c6c6 | 4ca340961c32d9738b0e91d55179688a9d9c3ae8 | /fn_qradar_advisor/fn_qradar_advisor/lib/html_gen_visitor.py | 64d0aec9bedbbb97bd8b3c9d7b1ee6d174d3c803 | [
"MIT"
] | permissive | bkaathi/resilient-community-apps | 99a319db837aa0a51bbae60d11c0eb4249395bc8 | 1cd6519808e83f94d2fe6d4eed647f863017535d | refs/heads/master | 2020-04-18T06:27:09.654541 | 2019-01-10T19:51:59 | 2019-01-10T19:51:59 | 167,321,490 | 1 | 0 | MIT | 2019-01-24T07:15:43 | 2019-01-24T07:15:43 | null | UTF-8 | Python | false | false | 3,047 | py | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
from visitors import Visitor
import resources
class HtmlGenVisitor(Visitor):
"""
A visitor to generate a html list item for each node
"""
def __init__(self, log):
self.obj_count = 0
self.link_count = 0
self.html = ""
self.log = log
def action(self, node):
"""
This visitor generates a html list item for a given node of a multi-root tree. It is
used to build a html representation for a given stix.
The html list item is added to the member variable of the visitor.
A html list item contains an icon (according to the type of this node) defined in
resourses.py. It is pointed to the png files stored in the official site of stix2.
It also contains a string value extracted from the node.
:param node: The node this visitor is visiting.
:return: Whether to continue traversing the rest of the tree
"""
if node.is_link:
self.link_count = self.link_count + 1
else:
self.obj_count = self.obj_count + 1
link = '<img src="{}" alt="link" style="width:15px; height:15px"/>'.format(resources.image_dict["link"]) if node.is_link else ""
image = node.type
value = node.name
if node.type != "observed-data":
if node.type in resources.image_dict:
image = '<img src="{}" alt="{}" style="width:20px; height:20px"/>'.format(resources.image_dict[node.type], node.type)
else:
# an observable data. The objects is more interesting
image = '<img src="{}" alt="{}" style="width:20px; height:20px"/>'.format(resources.image_dict[node.type], node.type)
if len(node.objects) > 0:
obj = node.objects["0"]
type = obj.get("type", "observable-data")
if type in resources.image_dict:
# object like ipv4 can have its own icon
image = '<img src="{}" alt="{}" style="width:20px; height:20px"/>'.format(resources.image_dict[type], type)
else:
self.log.error("Unable to find image for type {}".format(type))
image = "[{}]".format(type)
if type == "file":
value = obj.get("name", str(obj.get("hashes", "")))
else:
value = obj.get("value", "")
else:
self.log.error("No objects found in observed-data: {}".format(str(node)))
if value == "":
self.log.error("No value found in node: {}".format(str(node)))
self.html = self.html + "<li>" + image + " " + str(value) + link + "</li>"
return True
def before_children(self, node):
self.html = self.html + '<ul style="list-style-type:none">'
def after_children(self, node):
self.html = self.html + "</ul>"
| [
"hpyle@us.ibm.com"
] | hpyle@us.ibm.com |
26bbcaca0df675db0915dbf5351e3014b0f65e57 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtWidgets/QGraphicsSceneDragDropEvent.py | 1615f03c152583f732fc508fafd6a99aa4750b57 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from F:\Python\Python36\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
from .QGraphicsSceneEvent import QGraphicsSceneEvent
class QGraphicsSceneDragDropEvent(QGraphicsSceneEvent):
# no doc
def acceptProposedAction(self): # real signature unknown; restored from __doc__
""" acceptProposedAction(self) """
pass
def buttons(self): # real signature unknown; restored from __doc__
""" buttons(self) -> Qt.MouseButtons """
pass
def dropAction(self): # real signature unknown; restored from __doc__
""" dropAction(self) -> Qt.DropAction """
pass
def mimeData(self): # real signature unknown; restored from __doc__
""" mimeData(self) -> QMimeData """
pass
def modifiers(self): # real signature unknown; restored from __doc__
""" modifiers(self) -> Qt.KeyboardModifiers """
pass
def pos(self): # real signature unknown; restored from __doc__
""" pos(self) -> QPointF """
pass
def possibleActions(self): # real signature unknown; restored from __doc__
""" possibleActions(self) -> Qt.DropActions """
pass
def proposedAction(self): # real signature unknown; restored from __doc__
""" proposedAction(self) -> Qt.DropAction """
pass
def scenePos(self): # real signature unknown; restored from __doc__
""" scenePos(self) -> QPointF """
pass
def screenPos(self): # real signature unknown; restored from __doc__
""" screenPos(self) -> QPoint """
pass
def setDropAction(self, Qt_DropAction): # real signature unknown; restored from __doc__
""" setDropAction(self, Qt.DropAction) """
pass
def source(self): # real signature unknown; restored from __doc__
""" source(self) -> QWidget """
return QWidget
def __init__(self, *args, **kwargs): # real signature unknown
pass
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
c9a7d02df24ef1b3ef79f68d98c77a7c3b8eae0f | 7b4f9a5937c6d390289d7252266cfdd3c62be728 | /sessions/week_4/mandala.py | 62dd01fecabd0f6c1ff21610ab73abf6f19a0c24 | [] | no_license | LCfP-basictrack/basictrack-2020-2021-2b | d7ea1dc651c202d9e433588c9df8cf3554fd80e8 | 268f066a9baade3c4300a72ef7a866e535a714e0 | refs/heads/master | 2023-05-05T22:32:41.738918 | 2021-05-27T15:12:40 | 2021-05-27T15:12:40 | 358,275,084 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import turtle, math, random
# https://stackoverflow.com/questions/7198144/how-to-draw-a-n-sided-regular-polygon-in-cartesian-coordinates#7198179
from sessions.week_4.mandala_2 import square_curl
def polygon(michelangelo, number_of_sides, radius, center_x=0, center_y=0):
for n in range(number_of_sides + 1):
x_coordinate = radius * math.cos(2 * math.pi * n/number_of_sides)
y_coordinate = radius * math.sin(2 * math.pi * n/number_of_sides)
michelangelo.setposition(x_coordinate + center_x, y_coordinate + center_y)
michelangelo.pendown()
michelangelo.penup()
mandala = turtle.Screen()
mandala.bgcolor('cyan')
monk = turtle.Turtle()
monk.penup()
colors = ['red', 'purple', 'blue', 'green', 'yellow', 'orange']
for x in reversed(range(3, 14)):
monk.fillcolor(colors[x % len(colors)])
monk.begin_fill()
polygon(monk, x, 4 * (1.44 ** x))
monk.end_fill()
square_curl(monk, 4)
monk.hideturtle()
mandala.exitonclick()
| [
"mail@vincentvelthuizen.com"
] | mail@vincentvelthuizen.com |
4ae7312f1ac15936697e8de1af25a20e2124edf6 | 71764665e27f4b96bab44f38a4a591ffc2171c24 | /hhplt/ui/parallelTestingFrame.py | 13f87984db709d920c82cd50771b6f2dd6cb01db | [] | no_license | kingdomjc/RSU_production_VAT | 693f8c504acc0cc88af92942734ccb85f7e7d7c0 | 9a3d6d3f5a5edfaf30afdff725661630aafe434c | refs/heads/master | 2020-07-31T05:03:46.699606 | 2019-09-24T02:09:53 | 2019-09-24T02:09:53 | 210,491,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,797 | py | #encoding:utf-8
'''
Created on 2016-7-5
并行测试框架界面
对于并行(不管是真正的并行,还是依次串行)的测试,通过槽号来进行标识,
同步显示一批次所有的测试进度。
@author: 张文硕
'''
from PyQt4 import QtCore, QtGui, uic
from PyQt4.Qt import pyqtSlot
from hhplt.ui.AbstractContentFrame import AbstractContentFrame
from hhplt.parameters import SESSION,PARAM
from slotTestingFrame import SlotTestingFrame
from parallelFinishTestFrame import ParallelFinishTestFrame
from hhplt.testengine.parallelTestcase import startParallelTest
class ParallelTestingFrame(AbstractContentFrame):
def _ui(self):
self.connect(self, QtCore.SIGNAL("LOG(QString)"), self.log)
self.connect(self, QtCore.SIGNAL("START_ITEM(QString,QString)"), self.markItemStarting)
self.connect(self, QtCore.SIGNAL("ITEM_FINISH(QString,QString,bool)"), self.markTestItemFinish)
self.connect(self, QtCore.SIGNAL("FINISH_TEST(QString)"), self.switchToFinishTest)
#上述3个信号绑定,相比TestFrame来说都增加了一个入参,作为产品槽号
return "hhplt/ui/parallelTestingFrame.ui"
def _initUi(self):
if self.gridLayout.count() == 0: #只建立一次界面
self.slotFrames = {}
si = 0
locs = PARAM["productUiSlots"].split(";")
for slot in PARAM["productSlots"].split(","):
y,x = map(lambda p:int(p),locs[si].split(","))
self.slotFrames[slot] = SlotTestingFrame(self)
self.gridLayout.addWidget(self.slotFrames[slot],x,y)
si += 1
for f in self.slotFrames:
self.slotFrames[f].initTestingUi(f)
self.__startExecutingTest()
def __startExecutingTest(self):
self.mainWnd.logoutButton.setEnabled(False)
self.mainWnd.suiteSelect.setEnabled(False)
self.parallelTestResults = set()
startParallelTest(self)
##########################################################################################
def switchToFinishTest(self,slot):
self.parallelTestResults.add(slot)
if len(self.parallelTestResults) == self.gridLayout.count():
#到这里,表示这一波已经完成测试了
self._switchToFrame(ParallelFinishTestFrame)
self.mainWnd.logoutButton.setEnabled(True)
self.mainWnd.suiteSelect.setEnabled(True)
def markItemStarting(self,slot,fun):
self.slotFrames[unicode(slot)].markItemStarting(fun)
def markTestItemFinish(self,slot,fun,succOrFail):
self.slotFrames[unicode(slot)].markTestItemFinish(fun,succOrFail)
def log(self,msg):
self.logUi.log(msg)
| [
"929593844@qq.com"
] | 929593844@qq.com |
b2943348859e207bfaed2892e22c2417ff22fd0d | 2c2968b5d1b8bb3b749127ba219c333196c00f3f | /basic data types/swap_case/swap_case.py | 64c910b509b810c71102c2b06ed2dc8c0dd76158 | [] | no_license | jedthompson99/hacker_rank | b047592f6db70ff24811fea1851807ac3ea3473a | 0389019d82c8a2436ca767b73a29495b7fe481b9 | refs/heads/main | 2023-07-16T03:02:06.901263 | 2021-08-27T17:34:03 | 2021-08-27T17:34:03 | 395,409,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | def swap_case(s):
output = ''
for x in s:
if x.isupper() == True:
output += (x.lower())
elif x.isupper() != True:
output += (x.upper())
else:
output += x
return output
if __name__ == '__main__':
s = str(input())
result = swap_case(s)
print(result)
| [
"jedthompson@gmail.com"
] | jedthompson@gmail.com |
fcc4016e7a652f393b1fdc1027ff4893456322c6 | f15f0c1bdf9c1bfffa94d163f4337e02ab728433 | /test_files/lstm_text_generation.py | cd7dfdd0509fecd7e3591ff3045493400e550e37 | [
"MIT"
] | permissive | jichang1/keras-cntk-benchmark | a9bf5083c133f091a71e6ddcdccbc1a8cebbba8a | 5b95f4640f371e0b532f0a14f6bb05466f41eef1 | refs/heads/master | 2020-03-16T11:40:05.376677 | 2017-11-28T07:00:00 | 2017-11-28T07:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | '''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
from CustomCallback import EpochStatsLogger
logger = EpochStatsLogger()
path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars)), implementation=2))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in [1]:
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y,
batch_size=128,
epochs=10,
callbacks=[logger])
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.5]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
| [
"max@minimaxir.com"
] | max@minimaxir.com |
9ead980bf200d6da194cd10898c29065dfec38cb | 8049dd81d52e0659054b574323887cf06dbb03a9 | /api/features/tests/test_helpers.py | a0bf77574438162cf5e3c43ecfbf1acba6c2d0c0 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | funetes/flagsmith | b6bfdb79e023a6e8832e898b13635e36486e5aa2 | 97dba4175fac4e723e7bc4208e004d61e748eed7 | refs/heads/main | 2023-06-21T08:18:53.531171 | 2021-07-26T16:47:01 | 2021-07-26T16:47:01 | 389,799,101 | 1 | 0 | NOASSERTION | 2021-07-26T23:57:23 | 2021-07-26T23:57:23 | null | UTF-8 | Python | false | false | 591 | py | import pytest
from features.helpers import get_correctly_typed_value
from features.value_types import INTEGER, STRING, BOOLEAN
@pytest.mark.parametrize(
"value_type, string_value, expected_value",
(
(INTEGER, "123", 123),
(BOOLEAN, "True", True),
(BOOLEAN, "False", False),
(STRING, "my_string", "my_string"),
(STRING, "True", "True"),
(STRING, "False", "False"),
),
)
def test_get_correctly_typed_value(value_type, string_value, expected_value):
assert get_correctly_typed_value(value_type, string_value) == expected_value
| [
"matthewe@solidstategroup.com"
] | matthewe@solidstategroup.com |
6e41b50cc789687ed92640632a1c76ae928a9362 | 2a66fdf4ddcfb475f80a61a8d8c31b3a320c5bae | /code/pprog32.py | 554eeeaa6fe0c241386f2ff8e52df1d8550a5bc7 | [] | no_license | sujasriman/guvi | 12143757bee6e0679ca44f44a6624d34a6dd2cb4 | 74b4caf2a9c58da5e72eabe0b05adfe77310f71b | refs/heads/master | 2020-05-31T07:24:37.295062 | 2019-08-12T18:24:22 | 2019-08-12T18:24:22 | 190,163,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | n,k=input().split()
n=int(n)
k=int(k)
count=0
l=list(map(int,input().split()))
for i in range(n):
if(l[i]==k):
count+=1
break
if(count==1):
print("Yes")
else:
print("No")
| [
"noreply@github.com"
] | sujasriman.noreply@github.com |
a9657d3c7fb0f937feb3b41032e5aac72b6c7716 | 1d23c51bd24fc168df14fa10b30180bd928d1ea4 | /Lib/site-packages/cryptography/hazmat/_der.py | 66d32e3516b01dd420e3f3a2a7925c4e6ad0ff56 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | BeaverInc/covid19CityMontreal | 62dac14840dadcdf20985663bc2527c90bab926c | 1b283589f6885977a179effce20212a9311a2ac0 | refs/heads/master | 2021-05-22T20:01:22.443897 | 2020-06-21T08:00:57 | 2020-06-21T08:00:57 | 253,067,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import six
from cryptography.utils import int_from_bytes, int_to_bytes
# This module contains a lightweight DER encoder and decoder. See X.690 for the
# specification. This module intentionally does not implement the more complex
# BER encoding, only DER.
#
# Note this implementation treats an element's constructed bit as part of the
# tag. This is fine for DER, where the bit is always computable from the type.
CONSTRUCTED = 0x20
CONTEXT_SPECIFIC = 0x80
INTEGER = 0x02
BIT_STRING = 0x03
OCTET_STRING = 0x04
NULL = 0x05
OBJECT_IDENTIFIER = 0x06
SEQUENCE = 0x10 | CONSTRUCTED
SET = 0x11 | CONSTRUCTED
PRINTABLE_STRING = 0x13
UTC_TIME = 0x17
GENERALIZED_TIME = 0x18
class DERReader(object):
def __init__(self, data):
self.data = memoryview(data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_value is None:
self.check_empty()
def is_empty(self):
return len(self.data) == 0
def check_empty(self):
if not self.is_empty():
raise ValueError("Invalid DER input: trailing data")
def read_byte(self):
if len(self.data) < 1:
raise ValueError("Invalid DER input: insufficient data")
ret = six.indexbytes(self.data, 0)
self.data = self.data[1:]
return ret
def read_bytes(self, n):
if len(self.data) < n:
raise ValueError("Invalid DER input: insufficient data")
ret = self.data[:n]
self.data = self.data[n:]
return ret
def read_any_element(self):
tag = self.read_byte()
# Tag numbers 31 or higher are stored in multiple bytes. No supported
# ASN.1 types use such tags, so reject these.
if tag & 0x1f == 0x1f:
raise ValueError("Invalid DER input: unexpected high tag number")
length_byte = self.read_byte()
if length_byte & 0x80 == 0:
# If the high bit is clear, the first length byte is the length.
length = length_byte
else:
# If the high bit is set, the first length byte encodes the length
# of the length.
length_byte &= 0x7f
if length_byte == 0:
raise ValueError(
"Invalid DER input: indefinite length form is not allowed "
"in DER"
)
length = 0
for i in range(length_byte):
length <<= 8
length |= self.read_byte()
if length == 0:
raise ValueError(
"Invalid DER input: length was not minimally-encoded"
)
if length < 0x80:
# If the length could have been encoded in short form, it must
# not use long form.
raise ValueError(
"Invalid DER input: length was not minimally-encoded"
)
body = self.read_bytes(length)
return tag, DERReader(body)
def read_element(self, expected_tag):
tag, body = self.read_any_element()
if tag != expected_tag:
raise ValueError("Invalid DER input: unexpected tag")
return body
def read_single_element(self, expected_tag):
with self:
return self.read_element(expected_tag)
def read_optional_element(self, expected_tag):
if len(self.data) > 0 and six.indexbytes(self.data, 0) == expected_tag:
return self.read_element(expected_tag)
return None
def as_integer(self):
if len(self.data) == 0:
raise ValueError("Invalid DER input: empty integer contents")
first = six.indexbytes(self.data, 0)
if first & 0x80 == 0x80:
raise ValueError("Negative DER integers are not supported")
# The first 9 bits must not all be zero or all be ones. Otherwise, the
# encoding should have been one byte shorter.
if len(self.data) > 1:
second = six.indexbytes(self.data, 1)
if first == 0 and second & 0x80 == 0:
raise ValueError(
"Invalid DER input: integer not minimally-encoded"
)
return int_from_bytes(self.data, "big")
def encode_der_integer(x):
if not isinstance(x, six.integer_types):
raise ValueError("Value must be an integer")
if x < 0:
raise ValueError("Negative integers are not supported")
n = x.bit_length() // 8 + 1
return int_to_bytes(x, n)
def encode_der(tag, *children):
length = 0
for child in children:
length += len(child)
chunks = [six.int2byte(tag)]
if length < 0x80:
chunks.append(six.int2byte(length))
else:
length_bytes = int_to_bytes(length)
chunks.append(six.int2byte(0x80 | len(length_bytes)))
chunks.append(length_bytes)
chunks.extend(children)
return b"".join(chunks)
| [
"36340780+lanyutian88@users.noreply.github.com"
] | 36340780+lanyutian88@users.noreply.github.com |
b8f96911f2fe91e250fa087a46fb33eab202d105 | 4de28b1f6d97640834e4a795e68ca9987f9e2cd5 | /check plugins 2.2/nutanix/agent_based/prism_vm_tools.py | be70a2deafab1a2af92d49a8082c9123645f307f | [] | no_license | Yogibaer75/Check_MK-Things | affa0f7e6e772074c547f7b1df5c07a37dba80b4 | 029c546dc921c4157000d8ce58a878618e7bfa97 | refs/heads/master | 2023-09-01T15:52:28.610282 | 2023-08-29T06:18:52 | 2023-08-29T06:18:52 | 20,382,895 | 47 | 16 | null | 2023-07-30T15:52:22 | 2014-06-01T18:04:07 | Python | UTF-8 | Python | false | false | 3,130 | py | #!/usr/bin/env python3
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# (c) Andreas Doehler <andreas.doehler@bechtle.com/andreas.doehler@gmail.com>
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from typing import Any, Dict, Mapping
from cmk.base.plugins.agent_based.agent_based_api.v1 import register, Result, Service, State
from cmk.base.plugins.agent_based.agent_based_api.v1.type_defs import CheckResult, DiscoveryResult
Section = Dict[str, Any]
def discovery_prism_vm_tools(section: Section) -> DiscoveryResult:
if section.get("nutanixGuestTools"):
yield Service()
def check_prism_vm_tools(params: Mapping[str, Any], section: Section) -> CheckResult:
install_state = params.get("tools_install")
enabled_state = params.get("tools_enabled")
data = section.get("nutanixGuestTools")
if not data:
return
tool_install = data["installedVersion"]
tool_enabled = data["enabled"]
if tool_install is None and install_state == "installed":
yield Result(state=State.WARN, summary="No tools installed but should be.")
elif tool_install is None and install_state == "not_installed":
yield Result(state=State.OK, summary="No tools installed")
elif tool_install is not None and install_state == "installed":
yield Result(state=State.OK, summary="Tools with version %s installed" % tool_install)
elif tool_install is not None and install_state == "not_installed":
yield Result(
state=State.WARN,
summary="Tools with version %s installed but should not be" % tool_install,
)
if tool_enabled and enabled_state == "enabled":
yield Result(state=State.OK, summary="Tools enabled")
elif tool_enabled and enabled_state == "disabled":
message = "Tools enabled, but should be disabled"
yield Result(state=State.WARN, summary=message)
elif tool_enabled is None and enabled_state == "enabled":
message = "No tools enabled, but should be enabled"
yield Result(state=State.WARN, summary=message)
else:
message = "No tools enabled"
yield Result(state=State.OK, summary=message)
register.check_plugin(
name="prism_vm_tools",
service_name="NTNX VMTools",
sections=["prism_vm"],
check_default_parameters={
"tools_install": "installed",
"tools_enabled": "enabled",
},
discovery_function=discovery_prism_vm_tools,
check_function=check_prism_vm_tools,
check_ruleset_name="prism_vm_tools",
)
| [
"andreas.doehler@gmail.com"
] | andreas.doehler@gmail.com |
78e1aa79ef0a25dffcf228bef631c8ab7fc53e16 | 54f5bd4106b6e70d04708e3c4dc10421fe8b93bd | /tensor/matrix_product.py | 6bc2193390698a623f9797c0580f9e59914eff67 | [] | no_license | hslee1539/tic_tac_toe | 2c11f28e681681314e8f9b9a8cecccfd1f94f494 | aa7aa50de55ef4825f05a6d6ad1b00be7f40f770 | refs/heads/master | 2020-07-26T09:25:23.165078 | 2019-09-15T15:56:14 | 2019-09-15T15:56:14 | 208,603,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,943 | py | def matmul(left_array, left_shape, right_array, right_shape, out):
left_len = len (left_array)
right_len = len(right_array)
left_shape_len = len(left_shape)
right_shape_len = len(right_shape)
left_sw = 2 // (left_shape_len + 1)
right_sw = 2 // (right_shape_len + 1)
row = left_shape[-1 -1 % left_shape_len] * (1 - left_sw) + left_sw
col = right_shape[-1] * (1 - right_sw) + right_sw
matrix2d = col * row
right_matrix = col * right_shape[-1 -1 % right_shape_len]
product_max = left_shape[-1]
step_max = len(out)
step = 0
while(step < step_max):
left_product_step = step // col * product_max
right_product_step = step // matrix2d * right_matrix + step % col
product_step = 0
out[step] = 0
while(product_step < product_max):
out[step] += left_array[
(left_product_step + product_step) % left_len
] * right_array[
(right_product_step + product_step * col) % right_len
]
product_step += 1
step += 1
return None
def create_variables (left_array, left_shape, right_array, right_shape, dtype = float, stype = int):
#step1: 최고 차원의 텐서를 찾기.
#목적 : shape을 결정하는데, shape의 -1과 -2를 뺴면, 최고 차원의 shape과 같음. (단 left 또는 right 중, 1d가 있으면, 조금 달라짐.)
left_len = len(left_array)
right_len = len(right_array)
left_shape_len = len(left_shape)
right_shape_len = len(right_shape)
if(left_shape_len == right_shape_len):
if(left_len > right_len):
higher_array = left_array
higher_shape = left_shape
else:
higher_array = right_array
higher_shape = right_shape
elif(left_shape_len > right_shape_len):
higher_array = left_array
higher_shape = left_shape
else:
higher_array = right_array
higher_shape = right_shape
#장점:
#0d 일 경우, 1 // 0이 되어, 계산 애러가 나옴.
#numpy matmul도 1d일 경우, 애러를 표시하고, multiply를 대신 사용하라는 메세지가 나옴.
is_left_1d = 1 // left_shape_len
is_right_1d = 1 // right_shape_len
#0d를 허용하고 싶을 경우 코드
#left_sw = 2 // (len(left_shape) + 1)
#right_sw = 2 // (len(right_shape) + 1)
#shape의 길이는 left와 right의 최고 차원의 shape의 길이를 따르고, 1d가 있을 경우, 차원 감소함.
out_shape_len = len(higher_shape) - (is_left_1d ^ is_right_1d)
out_shape = [stype(0)] * out_shape_len
#-3 이하의 차원의 모양은 최고 차원의 shape을 따르므로, 이를 복사함.
for i in range(out_shape_len - 2):
out_shape[i] = higher_shape[i]
is_not_left_1d = 1 - is_left_1d
is_not_right_1d = 1 - is_right_1d
is_left_3d_or_more = 2 % left_shape_len // 2
is_right_3d_or_more = 2 % right_shape_len // 2
is_not_left_3d_or_more = 1 - is_left_3d_or_more
is_not_right_3d_or_more = 1 - is_right_3d_or_more
left_minus_1d = left_shape[-1]
right_minus_1d = right_shape[-1]
left_minus_2d = left_shape[-1 -1 % left_shape_len] # 만약 아니면, -1d를 가짐.
right_minus_2d = right_shape[-1 -1 % right_shape_len] # 만약 아니면, -1d를 가짐.
left_minus_3d = left_shape[-1 -2 % left_shape_len] # 만약 아니면, -1d를 가짐.
right_minus_3d = right_shape[-1 -2 % right_shape_len]# 만약 아니면, -1d를 가짐.
#-1 차원의 shape 결정
out_shape[-1] = (
right_minus_1d * is_not_left_1d * is_not_right_1d # 만약 모두 2d 이상인 경우
+ right_minus_1d * is_left_1d * is_not_right_1d # 만약 왼쪽이 1d인 경우
+ left_minus_2d * is_not_left_1d * is_right_1d)# 만약 오른쪽이 1d인 경우
#+ is_left_1d * is_right_1d)#이 코드는 아래 코드와 중복이 됨.
#-2 차원의 shape 결정.
out_shape[-1 -1 % out_shape_len] = (
left_minus_2d * is_not_left_1d * is_not_right_1d
+ right_minus_3d * is_left_1d * is_right_3d_or_more
+ left_minus_3d * is_left_3d_or_more * is_right_1d
+ out_shape[-1] * is_left_1d * is_not_right_1d * is_not_right_3d_or_more#만약 왼쪽은 1d, 오른쪽은 2d인 경우
+ out_shape[-1] * is_not_left_1d * is_right_1d * is_not_left_3d_or_more#만약 왼쪽은 2d, 오른쪽은 1d인 경우
+ is_left_1d * is_right_1d)#모두 1d인 경우. (이 경우, out_shape은 식에 의해 -1을 가리킴.)
out_array = [dtype(0)] * ( (left_minus_2d * is_not_left_1d + is_left_1d)
* (right_minus_1d * is_not_right_1d + is_right_1d)
* len(higher_array)
// higher_shape[-1]
// higher_shape[-1 -1 % len(higher_shape)])
return out_array, out_shape
| [
"qq1539@naver.com"
] | qq1539@naver.com |
4ec0b9211122485ed880ff863f78aa3d988ebc2d | 7f4c82f7eb8d2805e378586f14e214cdaacfdb4a | /books/model/CreditNoteRefundList.py | e4cafcf6c8dae80d6e27cbab18a1431ba8a20d16 | [
"MIT"
] | permissive | deepubansal/books-python-wrappers | 5a922267ec8382b3542638d894c96f4891b57bf5 | 51210c8d557a32564f976a56214d3c0807f46a90 | refs/heads/master | 2022-12-05T11:25:01.694021 | 2020-08-29T07:35:23 | 2020-08-29T07:35:23 | 288,738,813 | 0 | 0 | MIT | 2020-08-29T07:35:24 | 2020-08-19T13:26:04 | Python | UTF-8 | Python | false | false | 1,094 | py | #$Id$
class CreditNoteRefundList:
"""This class is used to create object for Creditnotes Refund."""
def __init__(self):
"""Initialize parameters for creditnotes refund list."""
self.creditnote_refunds=[]
self.page_context={}
def set_creditnote_refunds(self, creditnote_refunds):
"""Set creditnote refunds.
Args:
creditnote_refunds(instance): Creditnote refunds object.
"""
self.creditnote_refunds.append(creditnote_refunds)
def get_creditnote_refunds(self):
"""Get creditnote refunds
Returns:
list: List of creditnote refunds.
"""
return self.creditnote_refunds
def set_page_context(self, page_context):
"""Set page context.
Returns:
page_context(instance): Page context object.
"""
self.page_context=page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
| [
"sahaya.ramesh@zohocorp.com"
] | sahaya.ramesh@zohocorp.com |
8fe3b7e9d6e9abee3796ba161cfd87a85f927ece | d110accdd631abf4b7956f0d31a8c04dc79dd915 | /lightbus_examples/ex03_worked_example/store/web.py | 5368b7288cd7b8e18f690619ca828c976960bbb3 | [
"MIT"
] | permissive | gamb/lightbus | 000462f8b38391d5b9299fd52066dd034c4538ec | 60b896d632d60c9e1df1163e3e5b9173e3405d22 | refs/heads/master | 2020-03-13T06:32:43.425464 | 2018-04-18T16:01:58 | 2018-04-18T16:01:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | """A simple pet shop
Shows a list of animals, and you can click on each one.
Image resizing and page view tracking performed using lightbus.
"""
import os
import lightbus
from flask import Flask
from lightbus_examples.ex03_worked_example.store.bus import StoreApi
app = Flask(__name__)
lightbus.configure_logging()
bus = lightbus.create(flask=True)
PETS = (
'http://store.company.com/image1.jpg',
'http://store.company.com/image2.jpg',
'http://store.company.com/image3.jpg',
)
@app.route('/')
def home():
html = '<h1>Online pet store</h1><br>'
for pet_num, image_url in enumerate(PETS):
resized_url = bus.image.resize(url=image_url, width=200, height=200)
html += (
f'<a href="/pet/{pet_num}">'
f'<img src="{resized_url}">'
f'</a> '
)
bus.store.page_view.fire(url='/')
return html
@app.route('/pet/<int:pet_num>')
def pet(pet_num):
resized_url = bus.image.resize(url=PETS[pet_num], width=200, height=200)
bus.store.page_view.fire(url=f'/pet/{pet_num}')
html = f'<h1>Pet {pet_num}</h1>'
html = f'<img src="{resized_url}"><br />'
return html
| [
"adam@adamcharnock.com"
] | adam@adamcharnock.com |
35b998323f6bb0195b9f2823f30d52a8cf822742 | ac810c7e637afd67cf19704a1a724eaac56fed93 | /Hackerrank_python/7.collections/57.Collections.namedtuple().py | 7671875e84d41813fa0377ced5ca03bf34070b61 | [
"MIT"
] | permissive | Kushal997-das/Hackerrank | 57e8e422d2b47d1f2f144f303a04f32ca9f6f01c | 1256268bdc818d91931605f12ea2d81a07ac263a | refs/heads/master | 2021-10-28T06:27:58.153073 | 2021-10-18T04:11:18 | 2021-10-18T04:11:18 | 298,875,299 | 41 | 8 | MIT | 2021-03-01T04:40:57 | 2020-09-26T18:26:19 | Python | UTF-8 | Python | false | false | 326 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import*
N=int(input())
total_mark=0
fields=input().split()
for i in range(N):
students=namedtuple("k",fields)
MARKS,CLASS,NAME,ID=input().split()
x=students(MARKS,CLASS,NAME,ID)
total_mark+=int(x.MARKS)
print(total_mark/N)
| [
"noreply@github.com"
] | Kushal997-das.noreply@github.com |
d935f89a5d154b7925bd879688314e70c09b2e57 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/rds/models/HealthCheckSpec.py | a1fb8cb7aff9f9ad6b8cc01af9db3c30f1b4bbbb | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,376 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class HealthCheckSpec(object):
def __init__(self, monitorInterval=None, backendConnectTimeout=None, backendConnectAttempts=None):
"""
:param monitorInterval: (Optional) 健康检查时间间隔,范围为1~600,单位为秒;默认值为100
:param backendConnectTimeout: (Optional) 后端实例连接超时时间,范围为1~60,单位为秒;默认值为3
:param backendConnectAttempts: (Optional) 后端实例连接重试次数,范围为1~10,单位为次;默认值为1
"""
self.monitorInterval = monitorInterval
self.backendConnectTimeout = backendConnectTimeout
self.backendConnectAttempts = backendConnectAttempts
| [
"tancong@jd.com"
] | tancong@jd.com |
65b5b3e7e31a21d6f66955337417b51540fe8ccc | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0/build/kitti_launch/catkin_generated/pkg.installspace.context.pc.py | be9e0f38a9a2832bbbb66c6f61a11dd2de1c9e1f | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "kitti_box_publisher".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kitti_launch"
PROJECT_SPACE_DIR = "/home/myr907/autoware.ai/install"
PROJECT_VERSION = "1.11.0"
| [
"907097904@qq.com"
] | 907097904@qq.com |
7d7ec66f8142bd94932272b4eec055ff3d7e4917 | 6d5922f1c893d6c622c61b748910c2d0dda77587 | /SamplePrj/bin/django-admin | e01b6123484b23962fb20ecf12811811998392b6 | [] | no_license | shivamvku/Django_sampleApp | 263073dc4b40be93c89c330d37cb29b2cbac1f6f | 1ae4420ece4e8fe7d435750280859e3f4e9bcb2f | refs/heads/master | 2023-04-26T00:25:05.297391 | 2021-05-31T00:53:35 | 2021-05-31T00:53:35 | 372,249,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | #!/home/vineet/Documents/project/DjangoApp/SamplePrj/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"shivam.vku@gmail.com"
] | shivam.vku@gmail.com | |
ee76adfa6f20e7a3034f06793ce2524d334a7239 | 305e09b8b0df9f91a20ce6fb92e50342d41aadbf | /app/auth/views.py | 987f61d3facf589c5af2c2a24266602a87d487a7 | [] | no_license | gichimux/househelps | 432051bc23694b69929e67b8706e91cc6512f8f8 | c4693c9e4e977e42cf3d8a3d5f351857b3868f77 | refs/heads/master | 2023-02-04T07:11:00.961087 | 2019-06-07T02:49:36 | 2019-06-07T02:49:36 | 190,185,329 | 0 | 0 | null | 2023-02-02T06:28:22 | 2019-06-04T11:08:09 | Python | UTF-8 | Python | false | false | 1,695 | py | from flask import render_template,redirect,url_for,flash,request
from ..models import User
from .forms import RegistrationForm, LoginForm
from .. import db
from . import auth
from flask_login import login_user, logout_user, login_required
# from ..email import mail_message
@auth.route('/login', methods=["GET","POST"])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user, login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('invalid username or password')
title = "pitcher login"
return render_template('auth/login.html', login_form = login_form, title = title)
# if login_form.validate_on_submit():
# user = User.query.filter_by(email = login_form.email.data).first()
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, fname = form.fname.data,lname= form.lname.data, password = form.password.data)
db.session.add(user)
db.session.commit()
# mail_message("Welcome to Pitcher", "email/welcome_user", user.email, user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html', registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('you have been logged out')
return redirect(url_for("main.index")) | [
"gichimumwai1@gmail.com"
] | gichimumwai1@gmail.com |
b2e36bd882e5083c0a204cf4a02239586cacfa94 | 99f3d691f54bde953c68f7d4e3e88008d7f4aa6f | /basic/solution/string1.py | 75e2818b89488b247d2ccc1006812dd0fadf3bb5 | [
"Apache-2.0"
] | permissive | psych-214-fall-2016/day_00_lab | 4ca692030a80f4bae3947557a76bbc5d7185e4f0 | 9281731e9fb6a50c48d31e0142c2ff08de39afe6 | refs/heads/master | 2020-08-04T19:29:15.114615 | 2016-09-01T18:13:12 | 2016-09-01T18:13:12 | 67,155,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,184 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Python 3 port by Matthew Brett 2016, also Apache 2.0
from __future__ import print_function
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
# LAB(begin solution)
if count < 10:
return 'Number of donuts: ' + str(count)
else:
return 'Number of donuts: many'
# LAB(replace solution)
# return
# LAB(end solution)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
# LAB(begin solution)
if len(s) < 2:
return ''
first2 = s[0:2]
last2 = s[-2:]
return first2 + last2
# LAB(replace solution)
# return
# LAB(end solution)
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
# LAB(begin solution)
front = s[0]
back = s[1:]
fixed_back = back.replace(front, '*')
return front + fixed_back
# LAB(replace solution)
# return
# LAB(end solution)
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
# LAB(begin solution)
a_swapped = b[:2] + a[2:]
b_swapped = a[:2] + b[2:]
return a_swapped + ' ' + b_swapped
# LAB(replace solution)
# return
# LAB(end solution)
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print('donuts')
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print()
print('both_ends')
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print()
print('fix_start')
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print()
print('mix_up')
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| [
"matthew.brett@gmail.com"
] | matthew.brett@gmail.com |
5f7d4e4dd752b6d0ce1460a65810953cf47cc208 | 9b8e2992a38f591032997b5ced290fe1acc3ad94 | /histmatch.py | d8c1703c07d63dfddc7dade79b2e26ff7704a1ef | [] | no_license | girishdhegde/aps-2020 | c694443c10d0d572c8022dad5a6ce735462aaa51 | fb43d8817ba16ff78f93a8257409d77dbc82ced8 | refs/heads/master | 2021-08-08T04:49:18.876187 | 2021-01-02T04:46:20 | 2021-01-02T04:46:20 | 236,218,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
img1=cv2.imread("c:/users/girishhegde/dog.jpg")
img2=cv2.imread("c:/users/girishhegde/dog.jpg",0)
lab=cv2.cvtColor(img1,cv2.COLOR_BGR2Lab)
hic=cv2.equalizeHist(lab[0])
histcolor=np.zeros(256,np.uint8)
for i in range(256):
histcolor[hic[i]]=histcolor[hic[i]]+1
plt.subplot(2,1)
plt.bar(range(256),histcolor)
hig=cv2.equalizeHist(img2)
histgray=np.zeros(256,np.uint8)
for i in range(256):
histgray[hic[i]]=histgray[hic[i]]+1
plt.subplot(2,2)
plt.bar(range(256),histgray)
cv2.imshow("hist",hig) | [
"girsihdhegde12499@gmail.com"
] | girsihdhegde12499@gmail.com |
762f912d85fb05fdc5dd38d2cc8641015b20312f | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/statistics/view/innerglobalstats/innerglobalstats.py | a7d4f4cb7cb8f74f005f21b742bdaf34d605fc59 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 2,119 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class InnerGlobalStats(Base):
"""NOT DEFINED
The InnerGlobalStats class encapsulates a required innerGlobalStats resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'innerGlobalStats'
_SDM_ATT_MAP = {
'ColumnCaptions': 'columnCaptions',
'RowValues': 'rowValues',
}
def __init__(self, parent):
super(InnerGlobalStats, self).__init__(parent)
@property
def ColumnCaptions(self):
"""
Returns
-------
- list(str): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ColumnCaptions'])
@property
def RowValues(self):
"""
Returns
-------
- list(str): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['RowValues'])
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.