content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def load_bbbp_dataset(data_path, task_names=None, featurizer=None):
"""Load bbbp dataset ,process the classification labels and the input information.
Description:
The data file contains a csv table, in which columns below are used:
Num:number
name:Name of the compound
smiles:SMILES representation of the molecular structure
p_np:Binary labels for penetration/non-penetration
Args:
data_path(str): the path to the cached npz path.
task_names(list): a list of header names to specify the columns to fetch from
the csv file.
featurizer(pahelix.featurizers.Featurizer): the featurizer to use for
processing the data. If not none, The ``Featurizer.gen_features`` will be
applied to the raw data.
Returns:
an InMemoryDataset instance.
Example:
.. code-block:: python
dataset = load_bbbp_dataset('./bbbp/raw')
print(len(dataset))
References:
[1] Martins, Ines Filipa, et al. “A Bayesian approach to in silico blood-brain barrier penetration modeling.” Journal of chemical information and modeling 52.6 (2012): 1686-1697.
"""
if task_names is None:
task_names = get_default_bbbp_task_names()
csv_file = os.listdir(data_path)[0]
input_df = pd.read_csv(join(data_path, csv_file), sep=',')
smiles_list = input_df['smiles']
from rdkit.Chem import AllChem
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
preprocessed_rdkit_mol_objs_list = [m if not m is None else None for m in
rdkit_mol_objs_list]
smiles_list = [AllChem.MolToSmiles(m) if not m is None else
None for m in preprocessed_rdkit_mol_objs_list]
labels = input_df[task_names]
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
data_list = []
for i in range(len(smiles_list)):
if smiles_list[i] is None:
continue
raw_data = {}
raw_data['smiles'] = smiles_list[i]
raw_data['label'] = labels.values[i]
if not featurizer is None:
data = featurizer.gen_features(raw_data)
else:
data = raw_data
if not data is None:
data_list.append(data)
dataset = InMemoryDataset(data_list)
return dataset
| 5,344,700
|
def execute(connection, cmdline, **kwargs):
"""generic function to execute command for device
| Parameters:
| connection (Adaptor): connection of device
| cmdline (str): command line
| kwargs (dict): additional keyword arguments for command line execution
| Returns:
| str: output of command line
"""
result = Dgs.execute_cmdline(connection, cmdline, **kwargs)
return result
| 5,344,701
|
def json_to_numpy_mask(shapes, width, height):
"""Converts JSON labels with pixel classifications into NumPy arrays"""
img = Image.new("L", (width, height), 0)
for shape in shapes:
if shape["label"] == "barrel":
barrel_lst = [tuple(i) for i in shape["points"]]
ImageDraw.Draw(img).polygon(barrel_lst, outline=1, fill=1)
if shape["label"] == "line":
line_lst = [tuple(i) for i in shape["points"]]
ImageDraw.Draw(img).polygon(line_lst, outline=2, fill=2)
mask = np.array(img)
return mask
| 5,344,702
|
def create_record(input_path, wdl_location, non_static_inputs, assay_name):
"""Create sample records for mongo insert
Arguments:
input_path {[type]} -- [description]
wdl_location {[type]} -- [description]
non_static_inputs {[type]} -- [description]
assay_name {[type]} -- [description]
"""
static_inputs = convert_input_json(input_path)
assay = AssayRecord(
assay_name=assay_name,
wdl_location=wdl_location,
static_inputs=static_inputs,
non_static_inputs=non_static_inputs
)
print(assay)
| 5,344,703
|
def test_p2ps_wildcard_p2ps(dev):
"""P2PS wildcard SD Probe Request/Response"""
p2ps_wildcard = "org.wi-fi.wfds"
adv_id = p2ps_advertise(r_dev=dev[0], r_role='1',
svc_name='org.foo.service',
srv_info='I can do stuff')
adv_id2 = p2ps_advertise(r_dev=dev[0], r_role='1',
svc_name='org.wi-fi.wfds.send.rx',
srv_info='I can receive files upto size 2 GB')
if "OK" not in dev[1].global_request("P2P_FIND 10 type=social seek=org.foo.service seek=" + p2ps_wildcard):
raise Exception("Failed on P2P_FIND command")
ev1 = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev1 is None:
raise Exception("P2P-DEVICE-FOUND timeout on seeker side")
if dev[0].p2p_dev_addr() not in ev1:
raise Exception("Unexpected peer")
ev2 = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev2 is None:
raise Exception("P2P-DEVICE-FOUND timeout on seeker side (2)")
if dev[0].p2p_dev_addr() not in ev2:
raise Exception("Unexpected peer (2)")
if p2ps_wildcard not in ev1 + ev2:
raise Exception("P2PS Wildcard name not found in P2P-DEVICE-FOUND event")
if "org.foo.service" not in ev1 + ev2:
raise Exception("Vendor specific service name not found in P2P-DEVICE-FOUND event")
if "OK" not in dev[1].global_request("P2P_STOP_FIND"):
raise Exception("P2P_STOP_FIND failed")
dev[1].dump_monitor()
res = dev[0].global_request("P2P_SERVICE_DEL asp " + str(adv_id))
if res is None:
raise Exception("Unable to remove the advertisement instance")
if "OK" not in dev[1].global_request("P2P_FIND 10 type=social seek=" + p2ps_wildcard):
raise Exception("Failed on P2P_FIND command")
ev1 = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev1 is None:
raise Exception("P2P-DEVICE-FOUND timeout on seeker side")
if dev[0].p2p_dev_addr() not in ev1:
raise Exception("Unexpected peer")
if p2ps_wildcard not in ev1:
raise Exception("P2PS Wildcard name not found in P2P-DEVICE-FOUND event (2)")
dev[1].dump_monitor()
res = dev[0].global_request("P2P_SERVICE_DEL asp " + str(adv_id2))
if res is None:
raise Exception("Unable to remove the advertisement instance 2")
if "OK" not in dev[1].global_request("P2P_FIND 10 type=social seek=" + p2ps_wildcard):
raise Exception("Failed on P2P_FIND command")
ev1 = dev[1].wait_global_event(["P2P-DEVICE-FOUND"], timeout=2)
if ev1 is not None:
raise Exception("Unexpected P2P-DEVICE-FOUND event on seeker side")
dev[1].p2p_stop_find()
dev[1].dump_monitor()
| 5,344,704
|
def _format_call(value: ast3.Call, context: types.Context) -> typing.Text:
"""Format a function call like 'print(a*b, foo=x)'"""
try:
return _format_call_horizontal(value, context)
except errors.NotPossible:
return _format_call_vertical(value, context)
| 5,344,705
|
def get_tags(repo_dir):
"""
_get_tags_
returns a list of tags for the given repo, ordered as
newest first
"""
repo = git.Repo(repo_dir)
tags_with_date = {
tag.name: tag.commit.committed_date
for tag in repo.tags
}
return sorted(tags_with_date, key=tags_with_date.get, reverse=True)
| 5,344,706
|
def readh5(filename, GroupName=None):
"""
Read the HDF5 file 'filename' into a class. Groups within the hdf5 file are
by default loaded as sub classes, unless they include a _read_as attribute
(see sharpy.postproc.savedata). In this case, group can be loaded as classes,
dictionaries, lists or tuples.
filename: string to file location
GroupName = string or list of strings. Default is None: if given, allows
reading a specific group h5 file.
Warning:
Groups that need to be read as lists and tuples are assumed to conform to
the format used in sharpy.postproc.savedata
"""
Hinst = ReadInto()
### read and scan file
hdfile = h5.File(filename, 'r')
NamesList = [] # dataset names
hdfile.visit(NamesList.append)
### Identify higher level groups / attributes
if GroupName is None:
MainLev = []
for name in NamesList:
if '/' not in name: MainLev.append(name)
else:
if type(GroupName) is list:
MainLev = GroupName
else:
MainLev = [GroupName]
### Loop through higher level
for name in MainLev:
# sub-group
if type(hdfile[name]) is h5._hl.group.Group:
Ginst = read_group(hdfile[name])
try:
Ginst.name = name
except:
pass
setattr(Hinst, name, Ginst)
else:
setattr(Hinst, name, hdfile[name][()])
# close and return
hdfile.close()
return Hinst
| 5,344,707
|
def test_full_x_remove_z_default_4(init_full_x_remove_z_default_4, create_db_instance):
"""Test if full x removed z (default 4) is properly initialized and can fetch
from 1 of its 3 parents
"""
experiment = EVCBuilder().build_view_from({'name': 'full_x_remove_z_default_4'})
pairs = get_name_value_pairs(experiment.fetch_trials({}))
assert pairs == ((('/x', 9), ), (('/x', -9), ))
pairs = get_name_value_pairs(experiment.fetch_trials_tree({}))
# Note that full_x and full_x_full_y are filtered out because default_value=4
assert pairs == ((('/x', 4), ), (('/x', -4), ), (('/x', 9), ), (('/x', -9), ))
| 5,344,708
|
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d,
L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (kWh/h) (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_b2_d_t(ndarray): 1時間当たりの自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの給湯機の消費電力量 (kWh/d)
"""
# 給湯機の待機時及び水栓給湯時の補機による消費電力 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 給湯機の湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = get_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 給湯機の保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
print('E_E_hs_aux1 = {}'.format(np.sum(E_E_hs_aux1_d_t)))
print('E_E_hs_aux2 = {}'.format(np.sum(E_E_hs_aux2_d_t)))
print('E_E_hs_aux3 = {}'.format(np.sum(E_E_hs_aux3_d_t)))
return E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
| 5,344,709
|
def set_cookie(cookie):
"""
Set a new (or updated) cookie.
:param cookie: the cookie item, as a cookie.Cookie named tuple.
"""
krait.extra_headers.append(("set-cookie", str(cookie)))
| 5,344,710
|
def addCasingInformation(sentences):
"""Adds information of the casing of words"""
for sentenceIdx in range(len(sentences)):
sentences[sentenceIdx]['casing'] = []
for tokenIdx in range(len(sentences[sentenceIdx]['tokens'])):
token = sentences[sentenceIdx]['tokens'][tokenIdx]
sentences[sentenceIdx]['casing'].append(getCasing(token))
| 5,344,711
|
def best_promo(order):
"""
选择可用的最佳折扣
"""
return max(promo(order) for promo in promos)
| 5,344,712
|
def random_joint_positions(robot):
"""
Generates random joint positions within joint limits for the given robot.
@type robot: orpy.Robot
@param robot: The OpenRAVE robot
@rtype: np.array
@return:
"""
# Get the limits of the active DOFs
lower, upper = robot.GetActiveDOFLimits()
positions = lower + np.random.rand(len(lower))*(upper-lower)
return positions
| 5,344,713
|
def plot_gas_driven_models(axes):
"""
Plots the gas-driven starburst models in the top row of panels.
Parameters
==========
axes :: list
The 1-D list of matplotlib axes
"""
visuals.plot_output_3axes(axes, "../../simulations/sudden_2Gyr_5e9Msun",
"crimson", "Sr")
visuals.plot_output_3axes(axes, "../../simulations/sudden_5Gyr_5e9Msun",
"deepskyblue", "Sr")
visuals.plot_output_3axes(axes, "../../simulations/default", "black",
"Sr", second_linestyle = ':')
visuals.plot_track_points_intervals(axes[1],
vice.history("../../simulations/default"), element = "Sr")
| 5,344,714
|
def cli(sqlplus, user, host, password, database,
version, prompt, logfile, login_path,
auto_vertical_output, table, csv,
warn, execute, filename, okclirc):
"""An Oracle-DB terminal client with auto-completion and syntax highlighting.
\b
Examples:
- okcli -u my_user -h my_host.com -D schema
- okcli user/password@tns_name
- okcli user/password@tns_name -D schema
- okcli user/password@tns_name -e "query"
- okcli user@tns_name -@ query_file.sql
"""
if version:
print('Version:', __version__)
sys.exit(0)
if sqlplus:
user, password, host = parse_sqlplus_arg(sqlplus)
okcli = OCli(prompt=prompt, logfile=logfile,
login_path=login_path,
auto_vertical_output=auto_vertical_output, warn=warn,
okclirc=okclirc)
okcli.connect(database, user, password, host)
okcli.logger.debug('Launch Params: \n'
'\tdatabase: %r'
'\tuser: %r'
'\thost: %r', database, user, host)
if execute or filename:
if csv:
okcli.formatter.format_name = 'csv'
elif not table:
okcli.formatter.format_name = 'tsv'
# --execute argument
if execute:
try:
okcli.run_query(execute)
exit(0)
except Exception as e:
click.secho(str(e), err=True, fg='red')
exit(1)
# --filename argument
if filename:
try:
with open(os.path.expanduser(filename), encoding='utf-8') as f:
query = f.read()
okcli.run_query(query)
except IOError as e:
click.secho(str(e), err=True, fg='red')
if sys.stdin.isatty():
okcli.run_cli()
else:
stdin = click.get_text_stream('stdin')
stdin_text = stdin.read()
try:
sys.stdin = open('/dev/tty')
except FileNotFoundError:
okcli.logger.warning('Unable to open TTY as stdin.')
if (okcli.ddl_warning and
confirm_ddl_query(stdin_text) is False):
exit(0)
try:
new_line = True
if csv:
okcli.formatter.format_name = 'csv'
new_line = False
elif not table:
okcli.formatter.format_name = 'tsv'
okcli.run_query(stdin_text, new_line=new_line)
exit(0)
except Exception as e:
click.secho(str(e), err=True, fg='red')
exit(1)
| 5,344,715
|
def test_cancel_examples(example):
"""
We can't specify examples in test_fuzz_cancel (because we use data, see
https://hypothesis.readthedocs.io/en/latest/data.html#interactive-draw),
so we have this here for explicit examples.
"""
stream_req, stream_resp, draws = example
def draw(lst):
if draws:
this_draw = draws.pop(0)
for name, evt in lst:
if name == this_draw:
return name, evt
raise AssertionError(
f"{this_draw} not in list: {[name for name, _ in lst]}"
)
else:
return lst[0]
_test_cancel(stream_req, stream_resp, draw)
| 5,344,716
|
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""
with open(filename, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable
| 5,344,717
|
def check_clockwise(poly):
"""Checks if a sequence of (x,y) polygon vertice pairs is ordered clockwise or not.
NOTE: Counter-clockwise (=FALSE) vertice order reserved for inner ring polygons"""
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in zip(poly, poly[1:] + [poly[0]]))) < 0:
clockwise = not clockwise
return clockwise
| 5,344,718
|
def bookList(request):
"""测试"""
# 查询书籍信息:使用默认的管理器对象 : 在管理器上调用过滤器方法会返回查询集
# book_list = BookInfo.objects.all()
# 查询书籍信息:使用自定义的管理器对象
# book_list = BookInfo.books.all()
# 以下代码演示,自定义管理器的类给模型类新增初始化方法: 类比books.all()
# book1 = BookInfo.books.create_model('zxc')
# book2 = BookInfo.books.create_model('zxj')
# book_list = [book1,book2]
# 以下代码演示,限制查询集:limit 0,2
# book_list = BookInfo.books.all()[:2]
# 以下代码演示基础条件查询 : filter(模型属性__条件运算符=值)
# 1.查询id为1的书籍 : exact 判断相等,可以省虐,直接等号, pk 等价于 主键
# book_list = BookInfo.books.filter(id=1)
# 2.查询书名包含‘湖’的书籍 : contains :包含,类似于 like
# book_list = BookInfo.books.filter(name__contains='湖')
# 3.查询书名以‘部’结尾的书籍:endswith :以什么什么结尾;startswith以什么什么开头
# book_list = BookInfo.books.filter(name__endswith='部')
# 4.查询书名不为空的书籍 : isnull : 判断是否为空,False表示不为空,两个否定表示肯定 "容易懵逼"
# book_list = BookInfo.books.filter(name__isnull=False)
# 5.查询编号为2或4的书籍 in : 表示只能在指定的元素中选择,不表示区间 "容易懵逼"
# book_list = BookInfo.books.filter(id__in=[2,4])
# 6.查询编号大于2的书籍 gt 大于, gte 大于等于, lt 小于, lte 小于等于
# book_list = BookInfo.books.filter(id__gt=2)
# 7.查询id不等于3的书籍:exclude 查询满足条件以外的数据
# book_list = BookInfo.books.exclude(id=3)
# 8.查询1980年发表的书籍
# book_list = BookInfo.books.filter(pub_date__year='1980')
# 9.查询1990年1月1日后发表的书籍
# book_list = BookInfo.books.filter(pub_date__gt='1990-1-1')
# from datetime import date
# book_list = BookInfo.books.filter(pub_date__gt=date(1990,1,1))
# 以下代码,演示F对象和Q对象查询 : F('模型属性') Q(属性名__条件运算符=值) | Q(属性名__条件运算符=值)
# 1.查询阅读量大于评论量的书籍
# book_list = BookInfo.books.filter(readcount__gt=F('commentcount'))
# 2.查询阅读量大于2倍评论量的书籍 : F()支持计算
# book_list = BookInfo.books.filter(readcount__gt=F('commentcount') * 2)
# 1.查询阅读量大于20,或编号小于3的图书
# book_list = BookInfo.books.filter(Q(readcount__gt=20) | Q(id__lt=3))
# 2.查询编号不等于3的书籍 ~Q()
book_list = BookInfo.books.filter(~Q(id=3))
# 以下代码演示聚合过滤器aggregate();该过滤器可以调用出聚合函数的 Avg(), Sum(), max(), min(), count()
# 需求:计算阅读量的总数 aggregate() 返回单个字典对象 {'readcount__sum': 134}
total_count = BookInfo.books.aggregate(Sum('readcount'))
# 以下代码演示基础关联查询
# 1.查询编号为1的图书中所有人物信息 : 一查多 : peopleinfo_set
book1 = BookInfo.books.get(id=1)
people_list1 = book1.peopleinfo_set.all()
# 2.查询编号为1的英雄出自的书籍 : 多查一 : people1.book : 调用关联的外键属性即可
people1 = PeopleInfo.objects.get(id=1)
book2 = people1.book
# 以下代码演示内连接 : filter(关联的模型类小写__属性名__条件运算符=值)
# 1.查询书名为"天龙八部"的所有人物信息 : 一查多 : 内连接需要使用外键作为关联的模型类
people_list2 = PeopleInfo.objects.filter(book__name='天龙八部')
# 2.查询书籍中人物的描述包含"降龙"的书籍 : 多查一
book_list2 = BookInfo.books.filter(peopleinfo__description__contains='降龙')
# 构造上下文
context = {
'book_list':book_list,
'total_count':total_count,
'people_list1':people_list1,
'book2':book2,
'people_list2':people_list2,
'book_list2':book_list2
}
return render(request, 'Book/booklist.html', context)
| 5,344,719
|
def format_stats(stats):
"""Format statistics for printing to a table"""
result = ''
for key, value in stats.items():
result += f'{key} - {value}\n'
return result[:-1]
| 5,344,720
|
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (Number | list[Number]): Value to be filled for padding.
Default: None, which means no padding.
Returns:
list[ndarray] | ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 1:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start + w,
...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
| 5,344,721
|
def start_dhcp_servers(config):
"""Start DHCP server."""
# start dhcp servers
for device in config.board["devices"]:
if "options" in device and "no-dhcp-server" in device["options"]:
continue
if "options" in device and "dhcp-server" in device["options"]:
getattr(config, device["name"]).setup_dhcp_server()
| 5,344,722
|
def timestamp2str(ts):
""" Converts Timestamp object to str containing date and time
"""
date = ts.date().strftime("%Y-%m-%d")
time = ts.time().strftime("%H:%M:%S")
return ' '.join([date, time])
| 5,344,723
|
def define_class_functions(processes, stages, progress):
"""
Define and return class of unit tests for stand-alone functions
for the given configuration.
"""
class Test_functions(TestCase):
def test_mapreduce(self):
logger = log() if progress else None
result = mr4mp.mapreduce(
index, merge, range(50),
processes=processes, stages=stages, progress=logger
)
self.assertEqual(result, result_reference)
if progress:
self.assertEqual(
logger.to_list(),
list(range(50)) if stages is not None else []
)
def test_mapconcat(self):
logger = log() if progress else None
result = mr4mp.mapconcat(
add_one, range(0, 100),
processes=processes, stages=stages, progress=logger
)
self.assertEqual(list(result), list(range(1, 101)))
if progress:
self.assertEqual(
logger.to_list(),
list(range(100)) if stages is not None else []
)
return Test_functions
| 5,344,724
|
def is_homescreen():
""" description:
Check homescreen is displayed
usage:
ui_utils.is_homescreen()
tags:
ui, android, homescreen
"""
pass
| 5,344,725
|
def concept(*reference):
"""Reference to a semantic concept.
Parameters
----------
*reference : :obj:`str`
Keys pointing to the ruleset defining this concept in the rules file of
an ontology.
Returns
-------
:obj:`CubeProxy`
A textual reference to the concept that can be solved by the query
processor.
Examples
--------
>>> sq.concept("entity", "water")
{
"type": "concept",
"reference": [
"entity",
"water"
]
}
"""
obj = {"type": "concept", "reference": reference}
return CubeProxy(obj)
| 5,344,726
|
async def handle_spam(message: "Message", trigger_type: str, trigger: str) -> None:
"""
Handle the booru spam request
Args:
message: Discord message object related to this request
trigger_type: the trigger type that called this function ('author', 'first_word', or 'contains')
trigger: the relevant string from the message that triggered this call
"""
params = get_params(message)
try:
amount = int(params[0])
if amount < 1:
await message.channel.send(":thinking:")
return
except Exception:
await message.channel.send("Usage: `spam <amount> <optional space seperated tags>`")
return
params = params[1:]
await message.channel.trigger_typing()
await process_request(message.channel, amount, params)
| 5,344,727
|
def collate_tensors(batch, stack_tensors=torch.stack):
""" Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
batch (list of k): List of rows of type ``k``.
stack_tensors (callable): Function to stack tensors into a batch.
Returns:
k: Collated batch of type ``k``.
Example use case:
This is useful with ``torch.utils.data.dataloader.DataLoader`` which requires a collate
function. Typically, when collating sequences you'd set
``collate_fn=partial(collate_tensors, stack_tensors=encoders.text.stack_and_pad_tensors)``.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> collated = collate_tensors(batch)
>>> {k: t.size() for (k, t) in collated.items()}
{'column_a': torch.Size([2, 5]), 'column_b': torch.Size([2, 5])}
"""
if all([torch.is_tensor(b) for b in batch]):
return stack_tensors(batch)
if (all([isinstance(b, dict) for b in batch]) and
all([b.keys() == batch[0].keys() for b in batch])):
return {key: collate_tensors([d[key] for d in batch], stack_tensors) for key in batch[0]}
elif all([is_namedtuple(b) for b in batch]): # Handle ``namedtuple``
return batch[0].__class__(**collate_tensors([b._asdict() for b in batch], stack_tensors))
elif all([isinstance(b, list) for b in batch]):
# Handle list of lists such each list has some column to be batched, similar to:
# [['a', 'b'], ['a', 'b']] → [['a', 'a'], ['b', 'b']]
transposed = zip(*batch)
return [collate_tensors(samples, stack_tensors) for samples in transposed]
else:
return batch
| 5,344,728
|
def plot_single_loc(lon_pt, lat_pt, extent_lst,
lon, lat, save_loc=None, title=None,
):
"""
Plot a single point on a map
Parameters:
lon_pt (int) : longitude idx
lat_pt (int) : latitude idx
extent_lst (list) : plotting region
lon (np arr) : array of longitudes (len == 72)
lat (np arr) : array of latitudes (len == 46)
save_loc (str) : save location of plot (default None)
title (str) : title for plot if given
Returns:
matplotlib plot save if location give
"""
assert lon.shape[0] == 72
assert lat.shape[0] == 46
fig = plt.figure(figsize=(12.5, 8))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ax.scatter(lon[lon_pt], lat[lat_pt], transform=ccrs.PlateCarree(),
marker='s', s=50)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.STATES)
ax.add_feature(cfeature.OCEAN)
ax.set_extent(extent_lst)
if title:
ax.set_title(title)
if save_loc:
plt.savefig(save_loc)
plt.show()
| 5,344,729
|
def show_lsb(image_path, n):
"""Shows the n least significant bits of image"""
start = time()
image = Image.open(image_path)
# Used to set everything but the least significant n bits to 0 when
# using bitwise AND on an integer
mask = ((1 << n) - 1)
color_data = [(255 * ((rgb[0] & mask) + (rgb[1] & mask) + (rgb[2] & mask))
// (3 * mask),) * 3 for rgb in image.getdata()]
image.putdata(color_data)
print("Runtime: {0:.2f} s".format(time() - start))
file_name, file_extension = os.path.splitext(image_path)
image.save(file_name + "_{}LSBs".format(n) + file_extension)
| 5,344,730
|
def build_boundaries_layers(cyt_coord, nuc_coord, rna_coord):
"""
Parameters
----------
cyt_coord : np.ndarray, np.int64
Array of cytoplasm boundaries coordinates with shape (nb_points, 2).
nuc_coord : np.ndarray, np.int64
Array of nucleus boundaries coordinates with shape (nb_points, 2).
rna_coord : np.ndarray, np.int64
Array of mRNAs coordinates with shape (nb_points, 2) or
(nb_points, 3).
Returns
-------
cyt_boundaries : np.ndarray, np.float32
A 2-d binary tensor with shape (y, x) showing cytoplasm boundaries.
border.
nuc_boundaries : np.ndarray, np.float32
A 2-d binary tensor with shape (y, x) showing nucleus boundaries.
rna_layer : np.ndarray, np.float32
Binary image of mRNAs localizations with shape (y, x).
"""
# check parameters
stack.check_array(cyt_coord,
ndim=2,
dtype=[np.int64])
if nuc_coord is not None:
stack.check_array(nuc_coord,
ndim=2,
dtype=[np.int64])
if rna_coord is not None:
stack.check_array(rna_coord,
ndim=2,
dtype=[np.int64])
# build surface binary matrices from coordinates
cyt_surface, nuc_surface, rna_layer, _ = stack.from_coord_to_surface(
cyt_coord=cyt_coord,
nuc_coord=nuc_coord,
rna_coord=rna_coord)
# from surface binary matrices to boundaries binary matrices
cyt_boundaries = stack.from_surface_to_boundaries(cyt_surface)
nuc_boundaries = stack.from_surface_to_boundaries(nuc_surface)
# cast layer in float32
cyt_boundaries = stack.cast_img_float32(cyt_boundaries)
nuc_boundaries = stack.cast_img_float32(nuc_boundaries)
rna_layer = stack.cast_img_float32(rna_layer)
return cyt_boundaries, nuc_boundaries, rna_layer
| 5,344,731
|
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
| 5,344,732
|
def predicted_actual_chart(actual, predicted, title="Predicted vs Actual Values"):
"""Predicted vs actual values curve."""
source = pd.DataFrame({"x": actual, "y": predicted})
scatter = scatter_chart(source, "Actual", "Residual", title=title)
vmin = source.min().min()
vmax = source.max().max()
_df = pd.DataFrame({"x": [vmin, vmax], "y": [vmin, vmax]})
baseline = alt.Chart(_df).mark_line(strokeDash=[20, 5], color="black").encode(
alt.X("x"),
alt.Y("y"),
)
return scatter + baseline
| 5,344,733
|
def swapSeries(keypoints_array,v,c,pers1,pers2,start,end):
"""helper function for swapping sections of time series. This is useful because openpose isn't
consistent in labelling people so we need to rearrange things.
Args:
keypoints_array: all the data.
v: which video? - specifies first dimension of array
c: which camera? specifies second dimension of array
pers1: which people to swap 1
pers2: which people to swap 2
start: where in time series do we start? (TODO can be blank - start at beginning)
end: where in time series do we end? (TODO can be blank - to end)
Returns:
a rearranged keypoints_array
"""
temp = np.copy(keypoints_array[v,c,start:end,pers1,:]) #temporary copy pers1
keypoints_array[v,c,start:end,pers1,:] = keypoints_array[v,c,start:end,pers2,:] #pers2 to pers 1
keypoints_array[v,c,start:end,pers2,:] = temp
return keypoints_array
| 5,344,734
|
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
input_tensor = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_tensor, keep_prob, layer3, layer4, layer7
| 5,344,735
|
def run():
"""Runs the development server."""
db.create_all()
app.run(use_reloader=True, threaded=True, host='0.0.0.0', port=8080)
| 5,344,736
|
def interpolate(R1,R2,u):
"""Interpolate linearly between the two rotations R1 and R2. """
R = mul(inv(R1),R2)
m = moment(R)
angle = vectorops.norm(m)
if angle==0: return R1
axis = vectorops.div(m,angle)
return mul(R1,rotation(axis,angle*u))
| 5,344,737
|
def _iterate_list(element, sign, other_states, output_splitter, state_fields=True):
""" Used in the splitter2rpn to get recursion. """
for i, el in enumerate(element):
_ordering(
deepcopy(el),
i,
current_sign=sign,
other_states=other_states,
output_splitter=output_splitter,
state_fields=state_fields,
)
| 5,344,738
|
def insertion_sort(items):
"""Sort given items by taking first unsorted item, inserting it in sorted
order in front of items, and repeating until all items are in order.
Running time: O(n^2)
Memory usage: O(1)"""
for i in range(1, len(items)):
j = i
while j > 0 and items[j-1] > items[j]:
items[j], items[j-1] = items[j-1], items[j]
j -= 1
| 5,344,739
|
def plot_spikes(
spikes: dict,
ax: plt.Axes = None,
markersize: int = None,
color: tp.Union[str, tp.Any] = "k",
) -> plt.Axes:
"""Plot Spikes returned by NeuroDriver's OutputRecorder"""
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot()
for n, (name, ss) in enumerate(spikes.items()):
if "data" not in ss:
raise EOSPlotterException(f"'data' field missing for node {name}")
if "data" in ss and "time" in ss:
raise EOSPlotterException(
f"Data for node {name} is not compatible with required format, "
"data mush only have 'data' field. Did you mean "
"to call plot_data?"
)
if len(ss["data"]) > 0:
ax.plot(
ss["data"],
np.full(len(ss["data"]), n),
"|",
c=color,
markersize=markersize,
)
return ax
| 5,344,740
|
def restore(modeladmin, request, queryset, all_fields=False):
"""
Action to cancel changes
:param modeladmin: Administration class
:param request: HTTP request
:param queryset: All the entities selected
:param all_fields: Also restore data that cannot be edited?
:return: None
"""
fail, success = 0, 0
errors = []
for history in queryset.order_by('-creation_date'):
try:
result = history.restore(
current_user=request.user, from_admin=True, all_fields=all_fields)
if result:
success += 1
else:
fail += 1
except Exception as error:
errors.append((history.pk, error))
if success > 0:
messages.success(request, _(
"{} élément(s) ont été restaurés avec succès !").format(success))
if fail > 0:
messages.warning(request, _(
"{} élément(s) n'ont pas pu être restaurés car leurs relations sont manquantes !").format(fail))
for id, error in errors:
messages.error(request, _(
"L'élément {} n'a pu être restauré pour la raison suivante : {}").format(id, error))
| 5,344,741
|
def boardToString(board):
"""
return a string representation of the current board.
"""
# global board
# b = board
rg = range(board.size())
s = "┌────┬────┬────┬────┐\n|"+"|\n╞════╪════╪════╪════╡\n|".join(
['|'.join([getCellStr(board, x, y) for x in rg]) for y in rg])
s = "\n" + s + "|\n└────┴────┴────┴────┘"
return s
| 5,344,742
|
def create_data_ops(batch_size, num_elements_min_max):
"""Returns graphs containg the inputs and targets for classification.
Refer to create_data_dicts_tf and create_linked_list_target for more details.
Args:
batch_size: batch size for the `input_graphs`.
num_elements_min_max: a 2-`tuple` of `int`s which define the [lower, upper)
range of the number of elements per list.
Returns:
inputs_op: a `graphs.GraphsTuple` which contains the input list as a graph.
targets_op: a `graphs.GraphsTuple` which contains the target as a graph.
sort_indices_op: a `graphs.GraphsTuple` which contains the sort indices of
the list elements a graph.
ranks_op: a `graphs.GraphsTuple` which contains the ranks of the list
elements as a graph.
data_dicts_to_graphs_tuple:
Creates a `graphs.GraphsTuple` containing tensors from data dicts.
"""
inputs_op, sort_indices_op, ranks_op = create_graph_dicts_tf(
batch_size, num_elements_min_max)
# show["inputs_graphs"] = inputs_op
# show["sort_indices_graphs"] = sort_indices_op
# show["ranks_graphs"] = ranks_op
inputs_op = utils_tf.data_dicts_to_graphs_tuple(inputs_op)
sort_indices_op = utils_tf.data_dicts_to_graphs_tuple(sort_indices_op)
ranks_op = utils_tf.data_dicts_to_graphs_tuple(ranks_op)
inputs_op = utils_tf.fully_connect_graph_dynamic(inputs_op) # Adds edges to a graph by fully-connecting the nodes.
sort_indices_op = utils_tf.fully_connect_graph_dynamic(sort_indices_op)
ranks_op = utils_tf.fully_connect_graph_dynamic(ranks_op)
targets_op = create_linked_list_target(batch_size, sort_indices_op)
nodes = tf.concat((targets_op.nodes, 1.0 - targets_op.nodes), axis=1)
edges = tf.concat((targets_op.edges, 1.0 - targets_op.edges), axis=1)
targets_op = targets_op._replace(nodes=nodes, edges=edges)
return inputs_op, targets_op, sort_indices_op, ranks_op
| 5,344,743
|
def compute_file_path(data_path, path, command):
"""Return the computed file path for mocked data
Keyword arguments:
data_path -- the path of the folder that contains the subbed data
path -- the URL path
command -- the HTTP verb
"""
return os.path.realpath(
os.path.join(
data_path,
f'{path[1:]}.{command}.json'
)
)
| 5,344,744
|
def location_matches(stmt):
"""Return a matches_key which takes geo-location into account."""
if isinstance(stmt, Event):
context_key = get_location(stmt)
matches_key = str((stmt.concept.matches_key(), context_key))
elif isinstance(stmt, Influence):
subj_context_key = get_location(stmt.subj)
obj_context_key = get_location(stmt.obj)
matches_key = str((stmt.matches_key(), subj_context_key,
obj_context_key))
else:
matches_key = stmt.matches_key()
return matches_key
| 5,344,745
|
def _submit_to_measurement_sets_api(measurement_set, patch_update):
"""Send the submission object to the appropriate API endpoint."""
# TODO: Add a separate method to validate submission without sending it.
# Attempt to find existing measurement sets if any exist.
try:
matching_submission = get_existing_submissions(measurement_set)
measurement_set_id = get_measurement_set_id_from_submission(matching_submission)
except(NoMatchingSubmissionsException, NoMatchingMeasurementSetsException):
# If no measurement sets exist, we can safely POST.
response = _post_to_measurement_sets_api(measurement_set)
else:
# If a measurement set does exist, we use the existing id to PUT or PATCH.
if patch_update:
response = _patch_to_measurement_sets_api(measurement_set, measurement_set_id)
else:
response = _put_to_measurement_sets_api(measurement_set, measurement_set_id)
_handle_http_error(response, message='submit_to_measurement_sets_api')
return response
| 5,344,746
|
def parse_files(files, options):
"""Build datastructures from lines"""
lines = []
for line in finput(files, openhook=compr):
if (type(line) is bytes): line = line.decode('utf-8')
lines.append(line.rstrip().split("|"))
db = {}
db['rp'], db['users'], db['msgprof'], db['logins'] = {}, {}, {}, 0
# Audit log format we're trying to parse below:
# datetime|req_bind|req_id|rp|msg_profile|idp|resp_bind|resp_id|user|authn_mech|attribs|name_id|assert_id|ip
for event in lines:
try:
rp, msg_profile, user = list(event[i] for i in [3, 4, 8])
except ValueError:
print(linesep.join([
"ERROR: Unsupported log file format or compressed logs with Python < 2.5",
"See the documentation."]))
term(-1)
if msg_profile.lower().find("sso") > -1:
db['logins'] += 1
# we almost always need to count rps:
if len(rp) > 0:
if rp in db['rp']:
db['rp'][rp] += 1
else:
db['rp'][rp] = 1
# only count users if asked to
if len(user) > 0:
if options.uniqusers or options.xml or options.rrd or options.json:
if user in db['users']:
db['users'][user] += 1
else:
db['users'][user] = 1
# only count message profiles and rps if asked to
if options.msgprofiles:
if msg_profile in db['msgprof']:
if rp in db['msgprof'][msg_profile]:
db['msgprof'][msg_profile][rp] += 1
else:
db['msgprof'][msg_profile][rp] = 1
else:
db['msgprof'][msg_profile] = {}
db['msgprof'][msg_profile][rp] = 1
return db
| 5,344,747
|
def resize_preserving_order(nparray: np.ndarray, length: int) -> np.ndarray:
"""Extends/truncates nparray so that ``len(result) == length``.
The elements of nparray are duplicated to achieve the desired length
(favours earlier elements).
Constructs a zeroes array of length if nparray is empty.
See Also
--------
resize_array : cycles elements instead of favouring earlier ones
make_even : similar earlier-favouring behaviour for balancing 2 iterables
Examples
--------
Normal usage::
resize_preserving_order(np.array([]), 5)
# np.array([0., 0., 0., 0., 0.])
nparray = np.array([[1, 2],
[3, 4]])
resize_preserving_order(nparray, 1)
# np.array([[1, 2]])
resize_preserving_order(nparray, 3)
# np.array([[1, 2],
# [1, 2],
# [3, 4]])
"""
if len(nparray) == 0:
return np.zeros((length, *nparray.shape[1:]))
if len(nparray) == length:
return nparray
indices = np.arange(length) * len(nparray) // length
return nparray[indices]
| 5,344,748
|
def push(service, key, data):
"""Push
Called to push data to the sync cache
Args:
service (str): The name of the service using the sync
key (mixed): The key to push the data onto
data (mixed): The data to be pushed
Returns:
bool|string
"""
# Make sure the service and key are strings
if not isinstance(service, basestring): service = str(service)
if not isinstance(key, basestring): key = str(key)
# Generate the JSON
sJSON = JSON.encode({
"service": service,
"key": key,
"data": data
})
# Check if anyone is interested in the key
lSessions = _moRedis.smembers("%s%s" % (service, key))
# If there are any sessions
if lSessions:
# For each session found
for sSession in lSessions:
# Add the message to its list
p = _moRedis.pipeline()
p.lpush(sSession, sJSON)
p.expire(sSession, 21600)
p.execute()
# Now publish the message for anyone using websockets
_moRedis.publish("%s%s" % (service, key), sJSON)
# Return OK
return True
| 5,344,749
|
def load_zipcar_test(tests_folder,test_name):
"""
Loads and runs a specific strong controllability test from the Zipcar benchmarks.
"""
folder = os.path.abspath(os.path.expanduser(tests_folder))
tcs = load_pickle_file(os.path.join(folder,test_name))
counts=[0,0,0]
for tc in tcs:
if tc.type=='controllable':
counts[0]+=1
elif tc.type=='uncontrollable_bounded':
counts[1]+=1
elif tc.type=='uncontrollable_probabilistic':
counts[2]+=1
check_cc_strong_controllability(tcs,example_name='stp_'+test_name)
print('\t* Controllable durations %d'%(counts[0]))
print('\t* Uncontrollable durations %d'%(counts[1]))
print('\t* Probabilistic durations %d'%(counts[2]))
| 5,344,750
|
def cli(
config_file: str,
stmgr_type: str,
name: str,
rootpath: str,
tunnelhost: str,
hostport: str,
port: int,
verbose: bool,
) -> None:
"""
A HTTP service for serving data about clusters.
The statemanager's config from the given config file can be overrided using
options on this executable.
"""
log_level = logging.DEBUG if verbose else logging.INFO
log.configure(log_level)
stmgr_override = {
"type": stmgr_type,
"name": name,
"rootpath": rootpath,
"tunnelhost": tunnelhost,
"hostport": hostport,
}
config = Config(create_tracker_config(config_file, stmgr_override))
state.tracker = Tracker(config)
state.tracker.sync_topologies()
# this only returns when interrupted
uvicorn.run(app, host="0.0.0.0", port=port, log_level=log_level)
state.tracker.stop_sync()
# non-daemon threads linger and stop the process for quitting, so signal
# for cleaning up
os.kill(os.getpid(), signal.SIGKILL)
| 5,344,751
|
def get_args():
"""Get all parsed arguments."""
parser = argparse.ArgumentParser(description="CLASP training loop")
# data
parser.add_argument("--id", type=str,
help="run id")
parser.add_argument("--path-data-train", type=str,
help="path preprocessed csv file for training")
parser.add_argument("--path-offsd-train", type=str,
help="path preprocessed offset dictionary json file for training")
parser.add_argument("--path-data-valid-id", type=str,
help="path preprocessed csv file for valid id")
parser.add_argument("--path-offsd-valid-id", type=str,
help="path preprocessed offset dictionary json file for valid id")
parser.add_argument("--path-data-valid-ood", type=str,
help="path preprocessed csv file for valid ood")
parser.add_argument("--path-offsd-valid-ood", type=str,
help="path preprocessed offset dictionary json file for valid ood")
parser.add_argument("--path-results", type=str, default="results",
help="path to the results data, i.e., logs, model weights, etc. (default: results)")
parser.add_argument("--path-weights", type=str, default=None,
help="path to weights for reloading (default: None)")
parser.add_argument("--numw", type=int, default=0,
help="number of workers for pytorch dataloader (default: 0)")
# training
parser.add_argument("--world-size", type=int, default=2,
help="training world size (default: 2)")
parser.add_argument("--bs", type=int, default=8,
help="batch size (default: 8)")
parser.add_argument("--epochs", type=int, default=2,
help="epochs (default: 2)")
parser.add_argument("--dryrun", action="store_true", default=False,
help="Dry run for the setup runs only 4 steps in each epoch, use to test your setup (default: False)")
# model
# text encoder
parser.add_argument("--tenc-ntok", type=int, default=49408,
help="text encoder num_tokens (default: 49408)")
parser.add_argument("--tenc-dim", type=int, default=512,
help="text encoder dim (default: 512)")
parser.add_argument("--tenc-depth", type=int, default=6,
help="text encoder depth (default: 6)")
parser.add_argument("--tenc-seq-len", type=int, default=1024,
help="text encoder seq_len (default: 1024)")
parser.add_argument("--tenc-rev", action="store_true", default=True,
help="text encoder reversibility (default: True)")
# bioseq encoder
parser.add_argument("--bsenc-ntok", type=int, default=23,
help="bioseq encoder num_tokens (default: 23)")
parser.add_argument("--bsenc-dim", type=int, default=512,
help="bioseq encoder dim (default: 512)")
parser.add_argument("--bsenc-depth", type=int, default=6,
help="bioseq encoder depth (default: 6)")
parser.add_argument("--bsenc-seq-len", type=int, default=512,
help="bioseq encoder seq_len (default: 512)")
parser.add_argument("--bsenc-rev", action="store_true", default=True,
help="bioseq encoder reversibility (default: True)")
parser.add_argument("--bsenc-sparse-attn", action="store_true", default=False,
help="bioseq encoder sparse_attn (default: False)")
# logging and saving
parser.add_argument("--save-interval-epoch", type=int, default=1,
help="save interval epoch (default: 1")
parser.add_argument("--save-interval-step", type=int, default=4_000,
help="save interval step (default: 4_000")
args = parser.parse_args()
args.cmd = " ".join("\""+arg+"\"" if " " in arg else arg for arg in sys.argv)
return args
| 5,344,752
|
def nst_list2(ctx, filter):
"""list all Network Slice Templates (NST) in the system"""
nst_list(ctx, filter)
| 5,344,753
|
def omniidlArguments(args):
"""omniidlArguments(list)
Set default omniidl arguments for importIDL() and importIDLString().
e.g. omniidlArguments(["-I/my/include", "-DMY_DEFINE"])"""
global _omniidl_args
if type(args) is not types.ListType:
raise TypeError("argument must be a list of strings")
for arg in args:
if type(arg) is not types.StringType:
raise TypeError("argument must be a list of strings")
_omniidl_args = args
| 5,344,754
|
def get_db_filenames(database_name):
""" This is used to populate the dropdown menu, so users can
only access their data if their name is in the user column"""
con = sql.connect(database_name)
c = con.cursor()
names_list = []
for row in c.execute(
"""SELECT Dataset_Name FROM master_table"""):
names_list.append(row[0])
con.close()
exists_list = []
for name in names_list:
if if_file_exists_in_db(database_name, name):
exists_list.append(name)
return exists_list
| 5,344,755
|
def _pull_live_data_from_staging():
""" Marginally different from _pull_data; uses remote variables for local
paths etc., as the local environment is presumed to be staging server.
"""
if env['host'] == PRODUCTION_HOST_2:
# No need to pull data twice
return
filename = "{}-{}.sql".format(PROJECT_NAME, uuid.uuid4())
project_path = os.getenv(ENV_PROJECT_PATH_VARIABLE)
universal_path = "{}/tmp/{}".format(project_path, filename)
staging_db_backup_path = "{}staging-{}-{}.sql".format(REMOTE_DUMP_PATH, REMOTE_DB_NAME, uuid.uuid4())
run('pg_dump -U{} -xOf {} {}'.format(REMOTE_DB_USERNAME, universal_path, REMOTE_DB_NAME))
run('gzip {}'.format(universal_path))
local('echo {}'.format(universal_path))
local('echo {}'.format(os.getenv(universal_path)))
get("{}.gz".format(universal_path), "{}.gz".format(universal_path))
run('rm {}.gz'.format(universal_path))
local('pg_dump -xOf {} {}'.format(staging_db_backup_path, REMOTE_DB_NAME))
puts('Previous local database backed up to {}'.format(staging_db_backup_path))
local("psql -c 'DROP SCHEMA public CASCADE;'")
local("psql -c 'CREATE SCHEMA public;'")
local('gunzip {}.gz'.format(universal_path))
local('psql {} -f {}'.format(REMOTE_DB_NAME, universal_path))
local('rm {}'.format(universal_path))
| 5,344,756
|
def test_empty_packages():
""" Test with empty package list """
w = TcpWrappersFacts(daemon_lists=[DaemonList(value=["ALL"])])
p = []
d = [("openssh", ["sshd"])]
packages = config_affects_daemons(w, p, d)
assert not packages
| 5,344,757
|
def Calculate(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first.
"""
if bin_function:
values = list(map(bin_function, values))
bins = {}
for val in values:
v = "%f-%f" % tuple(val)
bins[v] = bins.get(v, 0) + 1
bb = list(bins.items())
if mode:
bb.sort(lambda x, y: cmp(y[1], x[1]))
else:
bb.sort()
r = []
for v, n in bb:
x, y = list(map(string.atof, string.split(v, "-")))
r.append((x, y, n))
return r
| 5,344,758
|
def getToday(format=3):
"""返回今天的日期字串"""
t = time.time()
date_ary = time.localtime(t)
if format == 1:
x = time.strftime("%Y%m%d", date_ary)
elif format == 2:
x = time.strftime("%H:%M", date_ary)
elif format == 3:
x = time.strftime("%Y/%m/%d", date_ary)
elif format == 4:
x = time.strftime("%Y/%m/%d %H:%M", date_ary)
elif format == 5:
x = time.strftime("%y%m%d", date_ary)
elif format == 6:
x = time.strftime("%Y-%m-%d", date_ary)
elif format == 7:
x = time.strftime("%Y/%m/%d %H:%M:%S", date_ary)
elif format == 8:
x = time.strftime("%Y-%m-%d %H:%M", date_ary)
elif format == 9:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
elif format == 10:
x = time.strftime("%Y年%m月%d日 %H:%M", date_ary)
else:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
return x
| 5,344,759
|
def _get_referenced(body, start, end, no_header, clean, as_xml, as_list):
"""Retrieve data from body between some start and end."""
if body is None or start is None or end is None:
return None
content_list = body.get_between(
start, end, as_text=False, no_header=no_header, clean=clean
)
if as_list:
return content_list
referenced = Element.from_tag("office:text")
for chunk in content_list:
referenced.append(chunk)
if as_xml:
return referenced.serialize()
else:
return referenced
| 5,344,760
|
def info(filepath: str) -> AudioMetaData:
"""Get signal information of an audio file.
Args:
filepath (str): Path to audio file
Returns:
AudioMetaData: meta data of the given audio.
"""
sinfo = torch.ops.torchaudio.sox_io_get_info(filepath)
return AudioMetaData(sinfo.get_sample_rate(), sinfo.get_num_frames(), sinfo.get_num_channels())
| 5,344,761
|
def _override_regex_to_allow_long_doctest_lines():
"""Allow too-long lines for doctests.
Mostly a copy from `pylint/checkers/format.py`
Parts newly added are marked with comment, "[PYTA added]: ..."
"""
def new_check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
def check_line(line, i, prev_line=None):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
# exclude \f (formfeed) from the rstrip
stripped_line = line.rstrip('\t\n\r\v ')
if not stripped_line and _EMPTY_LINE in self.config.no_space_check:
# allow empty lines
pass
elif line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
return i + 1
unsplit_ends = {
'\v', '\x0b', '\f', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029'}
unsplit = []
_split_lines = lines.splitlines(True)
# [PYTA added]: enumerate to get line_i index.
for line_i, line in enumerate(_split_lines):
if line[-1] in unsplit_ends:
unsplit.append(line)
continue
if unsplit:
unsplit.append(line)
line = ''.join(unsplit)
unsplit = []
# [PYTA added]: Skip error message for long doctest lines
doctest_tokens = compile(r'^\s*>>>.*?\n$')
if match(doctest_tokens, line):
continue
elif line_i > 0 and match(doctest_tokens, _split_lines[line_i-1]):
continue
i = check_line(line, i)
if unsplit:
check_line(''.join(unsplit), i)
FormatChecker.check_lines = new_check_lines
| 5,344,762
|
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close()
| 5,344,763
|
def register_coco_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, **metadata
)
| 5,344,764
|
def handle(*, artifacts: oa_types.SimplePropertyArtifacts) -> types.TColumn:
"""
Handle a simple property.
Args:
artifacts: The artifacts of the simple property.
Returns:
The constructed column.
"""
return facades.sqlalchemy.simple.construct(artifacts=artifacts)
| 5,344,765
|
def rollout_representation(representation_model, steps, obs_embed, action, prev_states, done):
"""
Roll out the model with actions and observations from data.
:param steps: number of steps to roll out
:param obs_embed: size(time_steps, batch_size, n_agents, embedding_size)
:param action: size(time_steps, batch_size, n_agents, action_size)
:param prev_states: RSSM state, size(batch_size, n_agents, state_size)
:return: prior, posterior states. size(time_steps, batch_size, n_agents, state_size)
"""
priors = []
posteriors = []
for t in range(steps):
prior_states, posterior_states = representation_model(obs_embed[t], action[t], prev_states)
prev_states = posterior_states.map(lambda x: x * (1.0 - done[t]))
priors.append(prior_states)
posteriors.append(posterior_states)
prior = stack_states(priors, dim=0)
post = stack_states(posteriors, dim=0)
return prior.map(lambda x: x[:-1]), post.map(lambda x: x[:-1]), post.deter[1:]
| 5,344,766
|
def argparser():
"""parse arguments from terminal"""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', dest='video')
parser.add_argument('-c', '--config', dest='config', default=CONFIG_FILE)
parser.add_argument('-o', '--output', dest='output')
return parser
| 5,344,767
|
def generate_random_ast(schema, rng):
"""End-to-end simulator for AST of Core DSL."""
distributions = [schemum[1] for schemum in schema]
partition_alpha = rng.gamma(1,1)
partition = generate_random_partition(partition_alpha, len(distributions), rng)
row_dividers = [generate_random_row_divider(rng) for _i in partition]
primitives = [
[output, dist, generate_random_hyperparameters(dist, rng)]
for output, dist in enumerate(distributions)
]
return [
[row_divider, [primitives[b] for b in block]]
for row_divider, block in zip(row_dividers, partition)
]
| 5,344,768
|
def ngmlrmap_bam_in(in_fn, ref_fa, out_bam, nproc=4):
"""Call ngmlr to map in_fn to reference fasta and output to out_bam"""
cmd = indep.ngmlrmap_bam_in_cmd(
in_fn=in_fn, ref_fa=ref_fa, out_bam=out_bam, nproc=nproc)
execute_as_bash([cmd], realpath('%s.ngmlrmap.bash' % out_bam))
| 5,344,769
|
def _opcode_to_string(opcode):
"""Return the printable name for a REIL opcode.
Args:
opcode (reil.Opcode): The opcode to provide in printable form.
Returns:
A string representing the opcode.
"""
return _opcode_string_map[opcode]
| 5,344,770
|
def get_shot(shot):
"""Retrieves shot object from database and returns as dictionary.
Raises exception if shot is not found.
"""
return __get_conn().get_entity(__table_name(),
shot['PartitionKey'], shot['RowKey'])
| 5,344,771
|
def report_value_count(data_frame: pd.DataFrame, column: str, digits: int = 2) -> str:
"""
Report the number and percentage of non-empty values in the column.
Parameters
----------
data_frame : pandas.DataFrame
A data frame with one or more columns.
column : str
The name of the column to report on.
digits : int, optional
The number of digits to report in the percentage (default 2).
Returns
-------
str
The number of non-empty cells and a percentage of the total number of rows.
"""
count = data_frame[column].notnull().sum()
# The type of `count` is `numpy.int64` which when divided by zero yields `nan`.
# This is undesired and we rather raise an exception here.
if len(data_frame) == 0:
raise ZeroDivisionError("The data frame is empty!")
return f"{humanize.intcomma(count)} ({count / len(data_frame):.{digits}%})"
| 5,344,772
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1,
num_parallel_calls=1, multi_gpu=False):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
multi_gpu: Whether this is run multi-GPU. Note that this is only required
currently to handle the batch leftovers, and can be removed
when that is handled directly by Estimator.
Returns:
A dataset that can be used for iteration.
"""
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
num_images = is_training and _NUM_IMAGES['train'] or _NUM_IMAGES['validation']
return resnet_run_loop.process_record_dataset(
dataset, is_training, batch_size, _NUM_IMAGES['train'],
parse_record, num_epochs, num_parallel_calls,
examples_per_epoch=num_images, multi_gpu=multi_gpu)
| 5,344,773
|
def getWinners(players, game):
"""
Return a list of winners
:param players:
:param game:
:return:
"""
# get score for each player
for i in range(0, len(game.players)):
game.players[i].credits = scoreFor(i, game)
currentPlayer = whoseTurn(game)
# add 1 to players who had less turns
best = 0
winners = []
for i in range(0, len(game.players)):
if best < game.players[i].credits:
best = game.players[i].credits
winners.append(i)
elif best == game.players[i].credits:
if i <= currentPlayer:
winners.append(i)
else:
if currentPlayer in winners:
winners = []
winners.append(i)
return winners
| 5,344,774
|
def pprint(data):
"""Print Json in pretty human readable format.
There's a standard module pprint, can pretty print python dict and list.
But it doesn't support sorted key, and indent doesn't looks good.
Usage::
>>> from dataIO import js
>>> js.pprint({"a": 1, "b": 2})
{
"a": 1,
"b": 2
}
**中文文档**
以人类可读的方式打印可Json化的Python对象。
"""
print(pretty_dumps(data))
| 5,344,775
|
def read_xsf(filepath):
"""
:param filepath filepath of the xtd file
:return cell and atoms need to build the pymatflow.structure.crystal object
"""
a = ase.io.read(filepath, format='xsf')
cell = a.cell.tolist()
atoms = []
for i in range(len(a.arrays['numbers'])):
for item in base.element:
if base.element[item].number == a.arrays['numbers'][i]:
symbol = item
break
atoms.append(base.Atom(
symbol,
a.arrays['positions'][i, 0],
a.arrays['positions'][i, 1],
a.arrays['positions'][i, 2]
))
return cell, atoms
| 5,344,776
|
def elina_abstract0_bound_linexpr(man, a, linexpr):
"""
Returns the ElinaInterval taken by an ElinaLinexpr0 over an ElinaAbstract0.
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
a : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
linexpr : ElinaLinexpr0Ptr
Pointer to the ElinaLinexpr0.
Returns
-------
interval : ElinaIntervalPtr
Pointer to the ElinaInterval.
"""
interval = None
try:
elina_abstract0_bound_linexpr_c = elina_auxiliary_api.elina_abstract0_bound_linexpr
elina_abstract0_bound_linexpr_c.restype = ElinaIntervalPtr
elina_abstract0_bound_linexpr_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr]
interval = elina_abstract0_bound_linexpr_c(man, a, linexpr)
except:
print('Problem with loading/calling "elina_abstract0_bound_linexpr" from "libelinaux.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr to the function')
return interval
| 5,344,777
|
def load_papertext(train_rate=0.8, dev_rate=0.1, test_rate=0.1, max_length=50, download_from_label_studio=True):
"""
Aspect Base sentiment analysis
:param kind: 是加载papertext数据,还是dem8的数据
:return:
:rtype:
"""
export_dir = "/opt/nlp/data/papertext/"
if download_from_label_studio:
json_path = export_data(hostname='http://127.0.0.1:8080/api/', dirpath=export_dir, jsonfile="0707.json")
data = collect_json(dirpath=export_dir)
valid_data = []
for one in data:
for complete in one['completions']:
if complete.get('was_cancelled'):
# 被取消了,那么跳过
continue
else:
# 只取第一个标注结果就行了,我们只有一个标注结果
if complete['result']:
result_one = complete['result'][0]
label = result_one['value']['choices'][0]
location = one['data']['location']
location = location.replace('行数','lines num').replace('段落宽度','paragraph width').replace('段落高度','paragraph height').replace('页面宽','page width').replace('页面高','page height')
text = one['data']['text']
valid_data.append([text,location,label])
print(f'从总的数据{len(data)}中, 共收集到有效数据{len(valid_data)}')
random.seed(30)
random.shuffle(valid_data)
total = len(valid_data)
train_num = int(total * train_rate)
dev_num = int(total * dev_rate)
test_num = int(total * test_rate)
train_data = valid_data[:train_num]
dev_data = valid_data[train_num:train_num+dev_num]
test_data = valid_data[train_num+dev_num:]
# 处理一下,保存的格式
def change_data(kind_data, name):
cnts = collections.Counter()
rows = []
for idx, one_data in enumerate(kind_data):
content, location, label = one_data
# label_id = labels2id[label]
assert label in ['作者','页眉','页脚','段落','标题','参考','表格','图像','公式','其它'], "label不是特定的关键字,那么paper_task_def.yml配置文件中的labels就不能解析,会出现错误"
sample = {'uid': idx, 'premise': content, 'hypothesis': location, 'label': label}
cnts[label] +=1
rows.append(sample)
print(f"{name}数据集的各个label的数量是: {cnts}")
return rows
papertext_train_data = change_data(train_data, name='train')
papertext_dev_data = change_data(dev_data, name='dev')
papertext_test_data = change_data(test_data, name='test')
return papertext_train_data, papertext_dev_data, papertext_test_data
| 5,344,778
|
def printhelp():
"""
print the document options for the simba3d command line utility
"""
print('Create a random initialization')
print('[options] -o <result files>')
print('\t-o or --output-files <result_file_name.csv> csv filename')
print('[Options]')
print('-n or --number-of-nodes <number of nodes>')
print('-m or --method <gaussian> specify the method of initialization (currently only has gaussian random noise')
print('-s or --seed <a non-negative integer> you can specify the seed if you want to')
#print '-f or --filter <string parameter name> <minimum value> <maximum value>'
| 5,344,779
|
def set_log_level_for_all_handlers(logger, level=logging.DEBUG):
"""
Set a log level for all the handlers on the provided logger.
"""
logger.setLevel(level)
handlers = logger.handlers
for handler in handlers:
handler.setLevel(level)
return logger
| 5,344,780
|
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
| 5,344,781
|
def compare_ask_ai_question():
"""
compare_ask_ai_question(): Ask a one questions to many product (GPT-3)
"""
try:
id_token = request.headers['Authorization']
claims = auth.verify_id_token(id_token)
uid = claims['uid']
data = request.json['data']
question = data['question']
product_ids = data['product_ids']
if claims['Enterprise'] is True:
product_answers = []
for product_id in product_ids:
todo = GPT3QA.document(product_id)
todo_dict = todo.get().to_dict()
if todo_dict['company_id'] == uid:
response = openai.Answer.create(
n=3,
temperature=0.35,
search_model="ada",
model="curie",
question=str(question),
file=todo_dict['gpt3_form_id'],
examples_context="In 2017, U.S. life expectancy was 78.6 years. With a 2019 population of 753,675, it is the largest city in both the state of Washington and the Pacific Northwest",
examples=[["What is human life expectancy in the United States?", "78 years."],
["what is the population of Seattle?", "Seattle's population is 724,305"]],
max_tokens=40,
stop=["\n", "<|endoftext|>"],
)
document_list = response['selected_documents']
df = pd.DataFrame(data=document_list)
text_list = df.nlargest(3, 'score')['text'].tolist()
answer_response = response['answers']
product_answers.append(
{"AI Answers": answer_response, "Reviews": text_list})
else:
return ("You are not authorized to view this page"), 403
return (jsonify(product_answers), 200)
else:
return ("You are not authorized to view this page"), 403
except Exception as e:
return f"An Error Occured: {e}"
| 5,344,782
|
def close_forecast_files(exporter):
"""Close the files associated with a forecast exporter.
Finish writing forecasts and close the output files opened by a forecast
exporter.
Parameters
----------
exporter : dict
An exporter object created with any initialization method implemented
in :py:mod:`pysteps.io.exporters`.
"""
if exporter["method"] == "geotiff":
pass # NOTE: There is no explicit "close" method in GDAL.
# The files are closed when all objects referencing to the GDAL
# datasets are deleted (i.e. when the exporter object is deleted).
if exporter["method"] == "kineros":
pass # no need to close the file
else:
exporter["ncfile"].close()
| 5,344,783
|
def read():
"""
Read temperature
:return: temperature
"""
# global ds18b20
location = '/sys/bus/w1/devices/' + ds18b20 + '/w1_slave'
tfile = open(location)
text = tfile.read()
tfile.close()
secondline = text.split("\n")[1]
temperaturedata = secondline.split(" ")[9]
temperature = float(temperaturedata[2:])
temperature = temperature / 1000
return temperature
| 5,344,784
|
def wine(root):
"""Title of Database: Wine recognition data
Updated Sept 21, 1998 by C.Blake : Added attribute information
These data are the results of a chemical analysis of
wines grown in the same region in Italy but derived from three
different cultivars.
The analysis determined the quantities of 13 constituents
found in each of the three types of wines.
Number of Instances
class 1 59
class 2 71
class 3 48
Data storage directory:
root = `/user/.../mydata`
wine data:
`root/wine/wine.txt`
`root/wine/wine.json`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/wine`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/wine`.
"""
start = time.time()
task_path = assert_dirs(root, 'wine')
url_introduce = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names'
url_txt = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
rq.files(url_introduce, gfile.path_join(task_path, 'introduce.txt'), verbose=0)
rq.table(url_txt, gfile.path_join(task_path, 'wine.txt'),
names=['label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'])
print('wine dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
| 5,344,785
|
def ddpg(env_fn, ac_kwargs=dict(), seed=0, cuda=True, train_interval=100, train_steps=50,
steps_per_epoch=5000, epochs=200, replay_size=int(1e6), gamma=0.99, hidden_size=64,
polyak=0.01, pi_lr=1e-4, q_lr=1e-3, batch_size=64, start_steps=1000,
act_noise=0, param_noise=0.2, max_ep_len=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# tf.set_random_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
memory = Memory(limit=replay_size, action_shape=(act_dim, ), observation_shape=(obs_dim, ))
agent = core.DDPGAgent(obs_dim, act_dim, hidden_size, memory,
batch_size=batch_size, tau=polyak, gamma=gamma, action_noise_std=act_noise, cuda=cuda,
param_noise_std=param_noise, action_range=(-act_limit, act_limit))
# Count variables
# var_counts = tuple(core.count_vars(scope) for scope in ['main/pi', 'main/q', 'main'])
# print('\nNumber of parameters: \t pi: %d, \t q: %d, \t total: %d\n'%var_counts)
def test_agent(n=10):
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(agent.step(o, noisy=False)[0])
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a, q, _, _ = agent.step(o, noisy=True)
logger.store(QVals=q.mean())
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
agent.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
"""
Perform DDPG updates
"""
if memory.nb_entries > batch_size and t % train_interval == 0:
if param_noise > 0:
distance = agent.adapt_actor_param_noise()
for _ in range(train_steps):
# Q-learning update
value_loss, policy_loss = agent.train()
logger.store(LossQ=value_loss, LossPi=policy_loss)
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env,}, None)
# Test the performance of the deterministic version of the agent.
test_agent(20)
# Log info about
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
# logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
| 5,344,786
|
def setup(bot):
"""
Mandatory function to add the Cog to the bot.
"""
bot.add_cog(GeneralDebugCog(bot))
| 5,344,787
|
def test_setter_with_models():
"""Assert that an error is raised when there are models."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
atom.run("LR")
with pytest.raises(PermissionError, match=r".*not allowed to change the data.*"):
atom.X = X_class
| 5,344,788
|
def initialize_scenario_data():
"""Will initialize the Scenario Data.
:return an empty ScenarioData named tuple
:rtype ScenarioData
"""
actors = {}
companies = {}
scenario_data = ScenarioData(actors, companies)
return scenario_data
| 5,344,789
|
def dynamicMass(bulk_density, lat, lon, height, jd, velocity, decel, gamma=1.0, shape_factor=1.21):
""" Calculate dynamic mass at the given point on meteor's trajectory.
Either a single set of values can be given (i.e. every argument is a float number), or all arguments
must be numpy arrays.
Arguments:
bulk_density: [float] Bulk density of the meteoroid in kg/m^3.
lat: [float] Latitude of the meteor (radians).
lon: [flaot] Longitude of the meteor (radians).
height: [float] Height of the meteor (meters).
jd: [float] Julian date of the meteor.
velocity: [float] Velocity of the meteor (m/s).
decel: [float] Deceleration in m/s^2.
Keyword arguments:
gamma: [flot] Drag coefficient. 1 by defualt.
shape_factor: [float] Shape factory for the body. 1.21 (sphere) by default. Other values:
- sphere = 1.21
- hemisphere = 1.92
- cube = 1.0
- brick 2:3:5 = 1.55
Return:
dyn_mass: [float] Dynamic mass in kg.
"""
# Calculate the atmosphere density at the given point
atm_dens = getAtmDensity_vect(lat, lon, height, jd)
# Calculate the dynamic mass
dyn_mass = (1.0/(bulk_density**2))*((gamma*(velocity**2)*atm_dens*shape_factor)/decel)**3
return dyn_mass
| 5,344,790
|
def traverse_tuple(t:tuple,mut:Mutator)->None:
""" Traverse an arbitrary Python tuple. Forbids changing items. """
assert isinstance(t,tuple)
for i in range(len(t)):
x=mut(i,t[i])
assert x==t[i], "Can't change tuple item"
if isinstance(t[i],list):
scanref_list(t[i])
elif isinstance(t[i],dict):
traverse_dict(t[i],mut)
elif isinstance(t[i],tuple):
traverse_tuple(t[i],mut)
| 5,344,791
|
def input_fn_tfrecords(files_name_pattern, num_epochs, batch_size, mode):
"""
Input functions which parses TFRecords.
:param files_name_pattern: File name to TFRecords.
:param num_epochs: Number of epochs.
:param batch_size: Batch size.
:param mode: Input function mode.
:return: features and label.
"""
return tf.data.experimental.make_batched_features_dataset(
file_pattern=files_name_pattern,
batch_size=batch_size,
features=get_metadata().schema.as_feature_spec(),
reader=tf.data.TFRecordDataset,
num_epochs=num_epochs,
shuffle=True if mode == tf.estimator.ModeKeys.TRAIN else False,
shuffle_buffer_size=1 + (batch_size * 2),
prefetch_buffer_size=1,
)
| 5,344,792
|
def validate(number):
"""Check if the number provided is a valid RUC number. This checks the
length, formatting, check digit and check sum."""
number = compact(number)
if len(number) != 13:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
if number[:2] < '01' or number[:2] > '24':
raise InvalidComponent() # invalid province code
if number[2] < '6':
# 0..5 = natural RUC: CI plus establishment number
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
ci.validate(number[:10])
elif number[2] == '6':
# 6 = public RUC
if number[-4:] == '0000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:9], (3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
elif number[2] == '9':
# 9 = juridical RUC
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:10], (4, 3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
else:
raise InvalidComponent() # third digit wrong
return number
| 5,344,793
|
def parse_raw(data: bytes) -> dict:
"""
Parse the contents of an environment retrieved from flash or memory
and provide an equivalent dictionary.
The provided *data* should being at the start of the variable definitions.
It **must not** contain the ``env_t`` metadata, such as the CRC32 word
and the ``flags`` value (only present when compiled with
"``CONFIG_SYS_REDUNDAND_ENVIRONMENT``".
A :py:exc:`ValueError` is raised if no environment variables are found.
"""
results = {}
regex = raw_var_regex()
for match in regex.finditer(data):
name = match.group('name').decode('ascii')
value = match.group('value').decode('ascii')
results[name] = value
if not results:
raise ValueError('No environment variables found')
return results
| 5,344,794
|
def _list_registered_paths() -> List[str]:
"""List available paths registered to this service."""
paths = []
for rule in application.url_map.iter_rules():
rule = str(rule)
if rule.startswith("/api/v1"):
paths.append(rule)
return paths
| 5,344,795
|
def plot_signals(data_dir, save_dir, plot_type):
"""
Plot signals in the correct format to generate the bottleneck features. For the signals
database the records are plotted stacked, this is, one below the other. For the recurrence
the signals are plotted in a 3x3 grid. Note that to maintain a pattern, each type of signal
has your place in the figure. For example, the ECG signals are the first ones in each plot.
Parameters:
save_dir -- Where to get the data
type: pathlib.Path
save_dir -- Where to save the plots
type: pathlib.Path
plot_type -- Which type of plot
type: String
values: signal, recurrence
"""
PLOT_SIGNAL_LIST = ["ECG1", "ECG2", "ABP", "PLETH", "RESP"] # List of Signals
SIGNAL_TYPES = {
"ECG": ["I", "II", "III", "MCL", "V", "aVF", "aVL", "aVR"], # Possible Signals encountered in physionet
"PLETH": "PLETH",
"ABP": "ABP",
"RESP": "RESP"
}
click.secho("Plotting Data...", fg="green")
# Creates the folder if it not exists
if not save_dir.is_dir():
click.secho("Folder " + str(save_dir) + " do not exists. Creating Folder...", fg="yellow", blink=True, bold=True)
save_dir.mkdir(exist_ok=True, parents=True)
# Reading Records
gc.collect() # Cleaning memory before start plotting
for f in tqdm(os.listdir(str(data_dir))):
# To do the pattern analysis in the models, we need to have a structured way to format the data
# Since there is 4 types of signals that can be encountered (where a record have 2 ECG signals)
# it is possible to have at most 5 inputs considering all the records. This snippet of code adds
# columns for the missing signals in each record and enforce an order to the output for the plots
record_signals = pd.read_csv(str(data_dir / f) , index_col=0)
# Skipping if file already exists
image_path = save_dir / (str(f[0:5]) + ".png")
if image_path.exists(): click.secho("File %s already exists. Skipping..." %str(image_path), fg="yellow")
ecg_ix_control = 0
for x in record_signals:
# Check if the signals is an ECG
if x in SIGNAL_TYPES["ECG"]:
ecg_ix_control += 1
record_signals.rename(columns={x : "ECG" + str(ecg_ix_control)}, inplace=True)
# Get the signals that are missing
missList = list(set(PLOT_SIGNAL_LIST) - set(list(record_signals)))
# Add signal to the record
for m in missList:
record_signals[m] = nan
# Creating Images
data_start = minuteToSample("4:40")
data_end = minuteToSample("5:00")
if(plot_type == "signal"):
threading.Thread(
target=stacked_signal_plot(
df = record_signals[PLOT_SIGNAL_LIST][data_start : data_end],
path = str(image_path),
figure_dim = 256,
dpi = 5
)
).start()
else:
threading.Thread(
target=stacked_recurrence_plot(
df = record_signals[PLOT_SIGNAL_LIST][data_start : data_end],
path = str(image_path),
figure_dim = 256,
dpi = 5
)
).start()
| 5,344,796
|
def input_fn(request_body, request_content_type):
"""An input_fn that loads a pickled numpy array"""
if request_content_type == "application/python-pickle":
array = np.load(BytesIO(request_body), allow_pickle=True)
return array
else:
raise Exception("Please provide 'application/python-pickle' as a request content type")
| 5,344,797
|
def game(x_train, x_test, y_train, y_test, algo='rf', show_train_scores=True):
"""Standard Alogrithms fit and return scores.
* Default Random State is set as 192 when posible.
* Available models - dc, rf, gb, knn, mc_ovo_rf, mc_ova_rf
"""
if algo is 'dc':
clf = clf = DummyClassifier(strategy='most_frequent', random_state=192)
elif algo is 'rf':
clf = RandomForestClassifier(n_jobs=-1, random_state=192)
elif algo is 'gb':
clf = GradientBoostingClassifier(random_state=192)
elif algo is 'knn':
clf = KNeighborsClassifier()
elif algo is 'mc_ovo_rf':
clf = OneVsOneClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
elif algo is 'mc_ova_rf':
clf = OneVsRestClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
else:
print('improper model name, please check help')
return 0, 0
clf = clf.fit(x_train, y_train)
# if user does not opt
ac_score, f1_score = 0, 0
if show_train_scores:
print('Training Scores')
ac_score, f1_score = check_metric(clf.predict(x_train), y_train)
print('\nTesting Scores')
ac_score1, f1_score1 = check_metric(clf.predict(x_test), y_test)
ret = {'classifier': clf,
'test_ac_score': ac_score,
'test_f1_score': f1_score,
'train_ac_score': ac_score1,
'train_f1_score': f1_score1,
}
return ret
| 5,344,798
|
def get_convolutional_model(vocab_size: int,
input_length: int,
num_classes: int,
embedding_size: int=300,
model_size: str='small'
) -> Model:
"""Create a character convolutional model
Parameters
----------
vocab_size: the number of characters in the vocabulary
input_length: the size of the input sequences (must be least 160)
num_classes: the number of output classes
embedding_size: the vector size of character representations
model_size: 'large' or 'small' feature sizes
Returns
-------
tf.keras.Model: an uncompiled keras model
"""
if model_size.lower() == 'small':
conv_filters = 256
dnn_size = 1024
elif model_size.lower() == 'large':
conv_filters = 1024
dnn_size = 2048
else:
ValueError("model size must be either 'small' or 'large'")
if input_length < 160:
ValueError('The input sequences must be at least 160 characters long')
model = Sequential()
model.add(layers.Embedding(
vocab_size,
embedding_size,
input_length=input_length,
name='character_embedding'
))
model.add(layers.Dropout(0.2, name='input_dropout'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_1'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_1'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_2'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_2'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_3'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_4'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_5'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_6'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_3'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_1'))
model.add(layers.Dropout(0.5, name='post_dropout_1'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_2'))
model.add(layers.Dropout(0.5, name='post_dropout_2'))
model.add(layers.Dense(num_classes,
activation='softmax',
name='output'))
return model
| 5,344,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.