content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_names():
"""Returns a list of names"""
return ["Ozzy", "Tony", "Geezer", "Bill"]
|
576d91b42b6792aed15ad5598310fe46f046b563
| 28,181
|
def twoballs_filename(data_dir, num_examples, num_feat, num_noise_feat, frac_flip):
"""Generate filename to save data and permutations"""
data_filename = data_dir + '/twoballs_n=%d_%d:%d_rcn=%1.1f.csv'\
% (num_examples, num_feat, num_noise_feat, frac_flip)
perm_filename = data_dir + '/twoballs_n=%d_%d:%d_rcn=%1.1f_perm.txt'\
% (num_examples, num_feat, num_noise_feat, frac_flip)
return (data_filename, perm_filename)
|
8b965192527088017ca5544894c1257e22222caf
| 28,182
|
def getcompanyrow(values):
"""
:param values:
:return: list of values representing a row in the company table
"""
companyrow = []
companyrow.append(values['_COMPANYNUMBER_'])
companyrow.append(values['_COMPANYNAME_'])
companyrow.append(values['_WEBADDRESS_'])
companyrow.append(values['_STREETADDRESS1_'])
companyrow.append(values['_STREETADDRESS2_'])
companyrow.append(values['_CITY_'])
companyrow.append(values['_STATE_'])
companyrow.append(values['_ZIPCODE_'])
companyrow.append(values['_NOTES_'])
companyrow.append(values['_PHONE_'])
return companyrow
|
ea7b96c13797cf9aeeaa6da4d25e9144e2fc4524
| 28,184
|
import json
def model_path():
"""
根据config.json文件获取预训练模型存储的路径
:return: 模型对应的路径
"""
with open("D:\git仓库\GraduationProject\GraduationDesign\config.json", 'r') as f:
return json.load(f)['model_path']
|
86147946eff2628e13cd3297e8c3c6618759eb1c
| 28,185
|
def _get_deconv_pad_outpad(deconv_kernel):
"""Get padding and out padding for deconv layers."""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError(f"Not supported num_kernels ({deconv_kernel}).")
return deconv_kernel, padding, output_padding
|
3c4a161e2d67bdb81d7e60a5e65ce232a4b0d038
| 28,186
|
import requests
import shutil
def download_file(url):
"""
Download file at given URL.
- url: url of file to be downloaded
- return: downloaded file name
"""
filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return filename
|
b153f84f6f04299e460e19b9703d2ebd30804144
| 28,187
|
def recip_to_duration(recip):
"""Convert a humdrum recip string to a wholenote duration.
"""
# Breves are indicated by zero.
if recip[0] == '0':
duration = 2
else:
duration = float(recip.rstrip('.')) ** -1
dots = recip.count('.')
return (2 * duration) - duration*(2.0 ** (-1 * dots))
|
200e86809488ab90df2e9dc0dde8bf7260804bc8
| 28,192
|
import random
def weighted_sampler(pop_dict, k=1):
"""randomly sample a dictionary's keys based on weights stored as values example:
m = {'a':3, 'b':2, 'c':5}
samps = weighted_sampler(m, k=1000)
#samps should be a ~ 300, b ~ 200, and c ~ 500
>>> samps.count('a')
304
>>> samps.count('b')
211
>>> samps.count('c')
485
of course, being a random sampler your results will vary"""
vals = list(pop_dict.keys())
weights = [pop_dict[i] for i in vals]
return random.choices(vals, weights = weights, k=k)
|
421cd16931a4b6695c8800cbc140aa86b9ce413a
| 28,196
|
def points_to_vector(point1, point2):
"""
Return vector from point1 to point2
"""
return point2 - point1
|
b8333afc6ecf6dbc8e1a9571b64d195ba896a73e
| 28,198
|
def validate(label, function, status, initial_value):
"""
this function validates a string
--------------------------------
label: str
the input label
function: fun
the code will start over as long as function (value) is equal to True
status: str
the status of the input
choices: optional, required, default =
initial_value: str
initial user input
-----------------------------------
Return: str
-----------------------------------
return the new one (initial_value if it has not changed)
"""
value = initial_value
is_optional = status == "optionel" or status == "default=1.0.0"
if is_optional:
bool_opt = lambda v: v != ''
else:
bool_opt = lambda v: True
while not function(value) and bool_opt(value):
print(f"{label} not valid")
value = input(f"{label}({status}): ")
return value
|
b677128451c7fafad56b6ced3d71d1daaabc39c8
| 28,199
|
import hashlib
def hash(answer):
"""
Compares an answer to a given hash according to https://github.com/davidcorbin/euler-offline
"""
return hashlib.md5(bytes(str(answer), 'ascii')).hexdigest()
|
fdb55ba8a2bcb91832cb3b7285ba7b04cd937cbb
| 28,200
|
import torch
def vector_to_index(vector, all_zeros=-1):
"""
Converts a binary vector to a list of indices corresponding to the locations where the vector was one.
"""
l = len(vector)
integers = torch.Tensor([i+1 for i in range(l)]) # i+1 so that the zeroth element and the zeros vector below don't conflict
zeros = torch.zeros(l)
indices = torch.where(vector==1, integers, zeros) # replace an element with its index+1, otherwise zero
flattenned_indices = indices.nonzero() - 1 # Remove all zero elements, and then correct for the i+1 from before
if len(flattenned_indices) == 0:
return torch.Tensor([[all_zeros]])
else:
return flattenned_indices
|
ba052dc3ed81e188249ad5e0192d864675412807
| 28,201
|
def enforce_use_of_all_cpus(model):
"""For sklearn models which have an `n_jobs` attribute,
set to -1. This will force all cores on the machine to be
used.
Args:
model : sklearn model
A trainable sklearn model
Returns:
model : sklearn model
Model 'as is' with `n_jobs` set to one if it
exists
"""
setattr(model, 'n_jobs', -1)
return model
|
6fb7878700ffc2fea960432ed76f6d1d90638a32
| 28,203
|
from contextlib import redirect_stdout
import io
def capture_stdout(func, *args, **kwargs):
"""Capture standard output to a string buffer"""
stdout_string = io.StringIO()
with redirect_stdout(stdout_string):
func(*args, **kwargs)
return stdout_string.getvalue()
|
4b9f4ed54644a28850d0f68e2dda1f484fa9644c
| 28,205
|
def try_string_to_number(string_in):
"""This function attempts to convert a string to either an integer
or a float. If both conversions fail, the string is simply
returned unmodified."""
try:
myout = int(string_in)
except:
try:
myout = float(string_in)
except:
myout = string_in
return myout
|
2e04135a59eb11e29ee3b20c91b9cf4c55fe9b6c
| 28,206
|
def generate_transaction_subsets(transaction, k):
"""
Get subsets of transactions of length k
>>> generate_transaction_subsets(['A', 'B', 'C', 'D', 'E'], 4)
[('A', 'B', 'C', 'D'), ('A', 'B', 'C', 'E'), ('A', 'B', 'D', 'E'), ('A', 'C', 'D', 'E'), ('B', 'C', 'D', 'E')]
:param transaction: list
:param k: int
:return:
"""
subsets = []
if k == 1:
return [(t,) for t in transaction]
# elif k > len(transaction):
# return []
# elif k == len(transaction):
# return [tuple(transaction)]
elif k == len(transaction) - 1:
for i in reversed(list(range(0, len(transaction)))):
subset = tuple(transaction[:i] + transaction[i + 1:])
subsets.append(subset)
else:
raise Exception('Trying to generate length %s subset of %s' % (k, transaction))
# for i in range(0, len(transaction) - (k - 1)):
# for t in generate_transaction_subsets(transaction[i + 1:], k - 1):
# subset = (transaction[i],) + t
# subsets.append(subset)
return subsets
|
38b29026ad36864bd5025a30f9723e21cf2a8415
| 28,207
|
import torch
def l2_dist_close_reward_fn(achieved_goal, goal, threshold=.05):
"""Giving -1/0 reward based on how close the achieved state is to the goal state.
Args:
achieved_goal (Tensor): achieved state, of shape ``[batch_size, batch_length, ...]``
goal (Tensor): goal state, of shape ``[batch_size, batch_length, ...]``
threshold (float): L2 distance threshold for the reward.
Returns:
Tensor for -1/0 reward of shape ``[batch_size, batch_length]``.
"""
if goal.dim() == 2: # when goals are 1-dimentional
assert achieved_goal.dim() == goal.dim()
achieved_goal = achieved_goal.unsqueeze(2)
goal = goal.unsqueeze(2)
return -(torch.norm(achieved_goal - goal, dim=2) >= threshold).to(
torch.float32)
|
ca65cb272f13a6caa5f88c75d4129bf15dc3c22d
| 28,208
|
def update_kvk(kvk):
"""
Function to update outdated KvK-numbers
:param kvk: the orginal KvK-number
:return: the updated KvK-number, if it was updated
"""
# Make sure KvK-number is a string
kvk = str(kvk)
# Add zero to beginning of outdated KvK-number and return it
if len(kvk) == 7:
new_kvk = '0'+kvk
return new_kvk
# If KvK-number is up to date, just return it
else:
return kvk
|
61b1d490de866786330a698e0370a360856c14a9
| 28,209
|
from typing import Union
def check_positive_int(input_int: Union[str, int]) -> int:
"""Check if `input_int` is a positive integer.
If it is, return it as an `int`. Raise `TypeError` otherwise
"""
input_int = int(input_int)
if input_int <= 0:
raise ValueError(f"A positive integer is expected, got {input_int}")
return input_int
|
686b062a3df929541a708fca0df10d3ae5a09088
| 28,210
|
def has_perm(user, perm):
"""Return True if the user has the given permission, false otherwise."""
return user.has_perm(perm)
|
01f3395f45c5ef0274b4b68fc557fa5f8e7b9466
| 28,211
|
def nextGreatest(nums):
"""
"""
i= len(nums)-1
while i >0:
if nums[i]> nums[i-1]:
pivot = nums[i]
nums[i] = nums[i-1]
nums[i-1] = pivot
return nums
i-=1
return nums[::-1]
|
cfb05679818978423881a5507f39a9183a3d55ea
| 28,212
|
def str_ellipsis(txt, length=60):
"""Truncate with ellipsis too wide texts"""
txt = str(txt)
ret = []
for string in txt.splitlines():
string = (string[: length - 4] + " ...") if len(string) > length else string
ret.append(string)
ret = "\n".join(ret)
return ret
|
7d0a109f82f6ec1537d15c121a8e867270f6b06a
| 28,213
|
def format_date_string(date_str):
"""
:param date_str: expects the format Thu Feb 28 14:51:59 +0000 2019
:return: a dictionary containing day,month,hour,min
"""
# month_val represents the month value
month_val = {
'Jan':1,
'Feb':2,
'Mar':3,
'Apr':4,
'May':5,
'Jun':6,
'Jul':7,
'Aug':8,
'Sep':9,
'Oct':10,
'Nov':11,
'Dec':12
}
time = date_str.split(" ")[3]
time = time.split(":")
date = date_str.split(" ")
# date[1] is month and date[2] is day of the month
# print(date)
date = [date[1],date[2]]
day = int(date[1])
month = month_val[date[0]]
hour = int(time[0])
minutes = int(time[1])
time = {}
time['day'] = day
time['month'] = month
time['hour'] = hour
time['min'] = minutes
return time
|
dd77f0b87c84c0e1be57fa40c1b6bc2fdda0ad75
| 28,215
|
import os
def read_content(*args):
"""Read file and return whole content."""
if len(args) <= 0:
raise ValueError('Invalid parameter!')
with open(os.path.join(*args), 'r') as _f:
return _f.read()
|
7d8ea628fa628c1e00aa4d49b5d5fbf6fcac3dba
| 28,216
|
def get_flight_segment_details(flight_segment):
"""
Function returns details for single flight segment as a dictionary.
"""
flight_segment_dict = {}
flight_segment_dict['connection'] = {'departure' : {},
'arrival' : {}
}
#flight connection info for departure and arrival
flight_segment_dict['connection']['departure']['time'] = flight_segment\
.find_element_by_xpath(".//strong[@class='time qa-segment-departure-time']").text
flight_segment_dict['connection']['departure']['airport'] = flight_segment\
.find_element_by_xpath(".//span[@class='airport-name qa-segment-departure-airport']/"\
"strong[@class='city-name']")\
.text
flight_segment_dict['connection']['arrival']['time'] = flight_segment\
.find_element_by_xpath(".//strong[@class='time qa-segment-arrival-time']").text
flight_segment_dict['connection']['arrival']['airport'] = flight_segment\
.find_element_by_xpath(".//span[@class='airport-name qa-segment-arrival-airport']/"\
"strong[@class='city-name']")\
.text
#flight infos
flight_segment_dict['info'] = {}
flight_segment_dict['info']['flight_time'] = flight_segment\
.find_element_by_xpath(".//strong[@class='qa-segment-flight-time']").text
flight_segment_dict['info']['airline_name'] = flight_segment\
.find_element_by_xpath(".//strong[@class='qa-segment-airline-name']").text
flight_segment_dict['info']['flight_number'] = flight_segment\
.find_element_by_xpath(".//strong[@class='qa-segment-flight-number']").text
flight_segment_dict['info']['service_class'] = flight_segment\
.find_element_by_xpath(".//strong[@class='qa-segment-service-class']").text
return flight_segment_dict
|
99d7463157bd63318dc5686b531691aa7111ca30
| 28,217
|
def get_oidc_auth(token=None):
""" returns HTTP headers containing OIDC bearer token """
return {'Authorization': token}
|
a951cfdf83c5a0def3128ce409ced5db4fa8d3b6
| 28,218
|
def guide_with_numbers(board_str: str) -> str:
"""Adds numbers and letters on the side of a string without them made out
of a board.
Parameters:
- board_str: The string from the board object.
Returns: The string with the numbers and letters.
"""
# Spaces and newlines would mess up the loop because they add extra indexes
# between pieces. Newlines are added later by the loop and spaces are added
# back in at the end.
board_without_whitespace_str = board_str.replace(" ", "").replace("\n", "")
# The first number, 8, needs to be added first because it comes before a
# newline. From then on, numbers are inserted at newlines.
row_list = list("8" + board_without_whitespace_str)
for i, char in enumerate(row_list):
# `(i + 1) % 10 == 0` if it is the end of a row, i.e., the 10th column
# since lists are 0-indexed.
if (i + 1) % 10 == 0:
# Since `i + 1` is always a multiple of 10 (because index 0, 10,
# 20, etc. is the other row letter and 1-8, 11-18, 21-28, etc. are
# the squares), `(i + 1) // 10` is the inverted row number (1 when
# it should be 8, 2 when it should be 7, etc.), so therefore
# `9 - (i + 1) // 10` is the actual row number.
row_num = 9 - (i + 1) // 10
# The 3 separate components are split into only 2 elements so that
# the newline isn't counted by the loop. If they were split into 3,
# or combined into just 1 string, the counter would become off
# because it would be counting what is really 2 rows as 3 or 1.
row_list[i:i] = [str(row_num) + "\n", str(row_num - 1)]
# 1 is appended to the end because it isn't created in the loop, and lines
# that begin with spaces have their spaces removed for aesthetics.
row_str = (" ".join(row_list) + " 1").replace("\n ", "\n")
# a, b, c, d, e, f, g, and h are easy to add in.
row_and_col_str = " a b c d e f g h \n" + row_str + "\n a b c d e f g h "
return row_and_col_str
|
cc6bec2fb1549cec3413d458a0cbaf87bc01bf48
| 28,220
|
def parse_antennafile(positions_file):
"""
Parses an antenna file from the LOFAR system software repository.
:param positions_file: a antenna file
:returns: a dictionary with array as key and positions as values
"""
file_handler = open(positions_file, 'r')
parsed = {}
state = 0
array = None
position = None # where is the station relative to the centre of the earth
antennanum = 0
positions = []
antennacount = 0
for line in file_handler:
line = line.strip()
if not line or line.startswith('#'):
continue
if state == 0: # array type
array = line
state = 1
elif state == 1: # array position
position = [float(x) for x in line.split()[2:5]]
state = 2
elif state == 2: # array properties meta data
antennanum = int(line.split()[0])
antennacount = antennanum
state = 3
elif state == 3:
if antennacount > 0:
positions.append([float(x) for x in line.split()])
antennacount -= 1
else:
assert (line == "]")
state = 0
parsed[array] = positions
positions = []
return parsed
|
9ea0821b65febb1233cd1c03e5d0fec1cb1ee471
| 28,221
|
import os
import errno
def createdir(directory, abs=False):
"""Safely create the given directory path if it does not exist.
Parameters
----------
directory: string
Path to directory that is being created.
abs: boolean, optional
Return absolute path if true
Returns
-------
string
"""
# Based on https://stackoverflow.com/questions/273192/
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e: # pragma: no cover
if e.errno != errno.EEXIST:
raise
if abs:
return os.path.abspath(directory)
else:
return directory
|
9a2792626cf4ab631e6354ac259cc6da8c116f20
| 28,223
|
import os
def get_log_config_path() -> str:
"""Return the config path for this agent, taken by default from LOGGERCONFIG environment variable
and then from a fixed default path.
"""
try:
return os.environ['LOGGERCONFIG']
except KeyError:
return "/etc/intel-manageability/public/node-agent/logging.ini"
|
6d224d74111e3affde653936134a469d520590f7
| 28,224
|
def bytes_from_hex(hexcode):
""" Given a valid string of whitespace-delimited hexadecimal numbers,
returns those hex numbers translated into byte string form.
"""
return ''.join(chr(int(code, 16)) for code in hexcode.split())
|
fbb800f0ea7f1327b42965fab48b740fad251027
| 28,225
|
import pytz
def convert_datetime_to_local_datetime(dt, tzname, output_naive=False):
"""Convert a datetime to local timezone.
If datetime is naive, it is assumed to be UTC.
"""
utc = pytz.timezone('UTC')
local = pytz.timezone(tzname)
# if datetime is naive, assume it is actually utc
if dt.tzname() is None:
dt = dt.replace(tzinfo=utc)
# get local version
loc_dt = dt.astimezone(local)
# convert to naive if requested
if output_naive:
loc_dt = loc_dt.replace(tzinfo=None)
return loc_dt
|
c5637e975bf321b46047e7073c0b15fcc5c04487
| 28,226
|
def validate_split_durations(train_dur, val_dur, test_dur, dataset_dur):
"""helper function to validate durations specified for splits,
so other functions can do the actual splitting.
First the functions checks for invalid conditions:
+ If train_dur, val_dur, and test_dur are all None, a ValueError is raised.
+ If any of train_dur, val_dur, or test_dur have a negative value that is not -1, an
ValueError is raised. -1 is interpreted differently as explained below.
+ If only val_dur is specified, this raises a ValueError; not clear what durations of training
and test set should be.
Then, if either train_dur or test_dur are None, they are set to 0. None means user did not specify a value.
Finally the function validates that the sum of the specified split durations is not greater than
the the total duration of the dataset, `dataset_dur`.
If any split is specified as -1, this value is interpreted as "first get the
split for the set with a value specified, then use the remainder of the dataset in the split
whose duration is set to -1". Functions that do the splitting have to "know"
about this meaning of -1, so this validation function does not modify the value.
Parameters
----------
train_dur : int, float
Target duration for training set split, in seconds.
val_dur : int, float
Target duration for validation set, in seconds.
test_dur : int, float
Target duration for test set, in seconds.
dataset_dur : int, float
Total duration of dataset of vocalizations that will be split.
Returns
-------
train_dur, val_dur, test_dur : int, float
"""
if val_dur and (train_dur is None and test_dur is None):
raise ValueError(
"cannot specify only val_dur, unclear how to split dataset into training and test sets"
)
# make a dict so we can refer to variable by name in loop
split_durs = {
"train": train_dur,
"val": val_dur,
"test": test_dur,
}
if all([dur is None for dur in split_durs.values()]):
raise ValueError(
"train_dur, val_dur, and test_dur were all None; must specify at least train_dur or test_dur"
)
if not all(
[dur >= 0 or dur == -1 for dur in split_durs.values() if dur is not None]
):
raise ValueError(
"all durations for split must be real non-negative number or "
"set to -1 (meaning 'use the remaining dataset)"
)
if sum([split_dur == -1 for split_dur in split_durs.values()]) > 1:
raise ValueError(
"cannot specify duration of more than one split as -1, unclear how to calculate durations of splits."
)
# set any None values for durations to 0; no part of dataset will go to that split
for split_name in split_durs.keys():
if split_durs[split_name] is None:
split_durs[split_name] = 0
if -1 in split_durs.values():
total_other_splits_dur = sum([dur for dur in split_durs.values() if dur != -1])
if total_other_splits_dur > dataset_dur:
raise ValueError(
"One dataset split duration was specified as -1, but the total of the other durations specified, "
f"{total_other_splits_dur} s, is greater than total duration of Dataset, {dataset_dur}."
)
else: # if none of the target durations are -1
total_splits_dur = sum(split_durs.values())
if total_splits_dur > dataset_dur:
raise ValueError(
f"Total of the split durations specified, {total_splits_dur} s, "
f"is greater than total duration of dataset, {dataset_dur}."
)
return split_durs["train"], split_durs["val"], split_durs["test"]
|
351bcd963d68e70d434ce7939cc4d01359285d1f
| 28,229
|
import os
def get_directory_size(path: str) -> int:
"""Returns folder size in bytes. Blocking function."""
size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
size += os.stat(os.path.join(dirpath, f)).st_size
return size
|
1aa96c4de551cce85e56ff1e9799c492e449a6ff
| 28,230
|
def telephone_subscription_msg(user, preferred_time):
""" Returns a tuple with (subject, body) """
name = user.get_full_name() or user.subscriber.name
subject = u'Nueva suscripción telefónica para %s' % (name)
st = user.suscripciones.all()[0].subscription_type_prices
subscription_type = st.all()[0].subscription_type
st_text = '-'
if subscription_type == u'DDIGM':
st_text = 'digital ilimitada'
elif subscription_type == u'DDIGMFS':
st_text = 'digital recargada'
elif subscription_type == u'PAPYDIM':
st_text = 'papel lunes a viernes'
elif subscription_type == u'PAPYLAS':
st_text = 'papel lunes a sabado'
elif subscription_type == u'LDFS':
st_text = 'papel fin de semana'
elif subscription_type == u'LENM':
st_text = 'Revista Lento'
pt_text = '-'
if preferred_time == '1':
pt_text = u'Cualquier hora (9:00 a 20:00)'
elif preferred_time == '2':
pt_text = u'En la mañana (9:00 a 12:00)'
elif preferred_time == '3':
pt_text = u'En la tarde (12:00 a 18:00)'
elif preferred_time == '4':
pt_text = u'En la tarde-noche (18:00 a 20:00)'
body = (u'Nombre: %s\nTipo suscripción: %s\nEmail: %s\n'
u'Teléfono: %s\nHorario preferido: %s\nDirección: %s\n'
u'Ciudad: %s\nDepartamento: %s\n') % (
name,
st_text,
user.email,
user.subscriber.phone,
pt_text, user.subscriber.address, user.subscriber.city,
user.subscriber.province)
return subject, body
|
ddefead8392350de0b408c262a41d976d05c7499
| 28,232
|
from pathlib import Path
from typing import Any
import csv
def load_csv(path: Path) -> Any:
"""Load data from csv file."""
with open(path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
items = list(reader)
return items
|
3a6d071f34bf239fb9de5c9eaabcaf6e71021373
| 28,233
|
def has_deformables(task):
"""
Somewhat misleading name. This method is used to determine if we should
(a) be running training AFTER environment data collection, and (b)
evaluating with test-time rollouts periodically. For (a) the reason was
the --disp option, which is needed to see cloth, will not let us run
multiple Environment calls. This also applies to (b), but for (b) there's
an extra aspect: most of these new environments have reward functions
that are not easily interpretable, or they have extra stuff in `info`
that would be better for us to use. In that case we should be using
`load.py` to roll out these policies.
Actually, even for stuff like cable-shape, where the reward really tells
us all we need to know, it would be nice to understand failure cases
based on the type of the target. So for now let's set it to record
`cable-` which will ignore `cable` but catch all my custom environments.
The custom cable environments don't use --disp but let's just have them
here for consistency in the main-then-load paradigm.
"""
return ('cable-' in task) or ('cloth' in task) or ('bag' in task)
|
9e547e7bbc4716506861b579e7175e8c0fdc9333
| 28,234
|
import os
def is_file(path):
"""Is the path a file"""
return os.path.isfile(path)
|
88bb47b361a2cda3d1fdc740b36abb32a2627051
| 28,235
|
def makeGray(rgb, factor, maskColor):
"""
Make a pixel grayed-out. If the pixel matches the maskColor, it won't be
changed.
:param tuple `rgb`: a tuple of red, green, blue integers, defining the pixel :class:`wx.Colour`;
:param float `factor`: the amount for which we want to grey out a pixel colour;
:param `maskColor`: the mask colour.
:type `maskColor`: tuple or :class:`wx.Colour`.
:rtype: tuple
:returns: An RGB tuple with the greyed out pixel colour.
"""
if rgb != maskColor:
return tuple([int((230 - x)*factor) + x for x in rgb])
else:
return rgb
|
b4ac226c76794fa4de39f4944a734c5f6a267cf2
| 28,236
|
import ast
import inspect
def function_has_return(callable):
"""
Devuelve True si la función tiene sentencia return, False en caso contrario.
No funciona con funciones cuyo código no esté escrito en Python.
:param callable:
:return:
"""
return any(isinstance(node, ast.Return) for node in ast.walk(ast.parse(inspect.getsource(callable))))
|
c2da88653249239ae22e0a1cd6128ca2ffaba9ca
| 28,237
|
import requests
import json
def get_company_info(id):
"""
This is a simple function that allows you to pull data from PRH API
>>> get_company_info('0912797-2')
'Noksu Oy'
"""
url = f"https://avoindata.prh.fi/tr/v1/{id}"
payload = ""
response = requests.request("GET", url, data=payload)
if response.status_code == 200:
company = json.loads(response.text)
return company['results'][0]['name']
else:
return False
|
6d2b1d7d44b1e3c6cab2ada28f1022ec2ab8ca24
| 28,238
|
def overlap_time_xr(*args, time_dim='time'):
"""return the intersection of datetime objects from time field in *args"""
# caution: for each arg input is xarray with dim:time
time_list = []
try:
for ts in args:
time_list.append(ts[time_dim].values)
except KeyError:
print('"{}" dim should be at all args...'.format(time_dim))
intersection = set.intersection(*map(set, time_list))
intr = sorted(list(intersection))
return intr
|
90e8fb49b23135e2f6d36b31aa78110bad2a1f00
| 28,239
|
def display_users(user):
"""
Function to display saved user
"""
return user.display_user()
|
0d70579312b0e79ddfd95527864b7c09253c5a6d
| 28,240
|
def train_valid_split(x_train, y_train, split_index=45000):
"""Split the original training data into a new training dataset
and a validation dataset.
Args:
x_train: An array of shape [50000, 3072].
y_train: An array of shape [50000,].
split_index: An integer.
Returns:
x_train_new: An array of shape [split_index, 3072].
y_train_new: An array of shape [split_index,].
x_valid: An array of shape [50000-split_index, 3072].
y_valid: An array of shape [50000-split_index,].
"""
x_train_new = x_train[:split_index]
y_train_new = y_train[:split_index]
x_valid = x_train[split_index:]
y_valid = y_train[split_index:]
return x_train_new, y_train_new, x_valid, y_valid
|
fd8eb959fd67c5a5cdfca0399e8b4fae1ff654d8
| 28,241
|
def field_values(iterable, field):
"""
Convert an iterable of models into a list of strings, one for each model,
where the string for each model is the value of the field "field".
"""
objects = []
if field:
for item in iterable:
objects.append(getattr(item, field))
return objects
|
846dd0e74de5590610a953a7bfc7240a2c5d90cf
| 28,243
|
import struct
def structparse_ip_header_info(bytes_string: bytes):
"""Takes a given bytes string of a packet and returns information found in the IP header such as the IP Version, IP Header Length, and if IP Options are present.
Examples:
>>> from scapy.all import *\n
>>> icmp_pcap = rdpcap('icmp.pcap')\n
>>> firstpacket = icmp_pcap[0]\n
>>> thebytes_firstpacket = firstpacket.__bytes__()\n
>>> structparse_ip_header_len(thebytes_firstpacket)\n
{'ip_version': 4, 'ip_header_len': 20, 'info': 'IHL = 20 bytes, No IP Options Present'}
References:
https://docs.python.org/3/library/struct.html
Args:
bytes_string (bytes): Reference a bytes string representation of a packet.
Returns:
dict: Returns a dictionary.
"""
#
# This is an alternate way to get to the data we want, but we want to demo the usage of struct
# - ip_layer_plus = bytes_string[14:]
# - ip_byte0 = ip_layer_plus[0]
#
# This uses 'network byte order' (represented by '!') which is Big Endian (so we could have use '>' instead of '!'); we then ignore the first 14 bytes (which is the Ethernet header) using '14x', and process the next 1 byte as an unsigned integer using 'B'
# - we use the [0] because the '.unpack()' method always returns a tuple, even when a single element is present, and in this case we just want a single element
ip_byte0 = ( struct.unpack('!14xB', bytes_string[:15]) )[0]
# Doing a 'bit shift' of 4 bits to the right, pushing the most significant nibble to the right, and pushing the least significant nibble "over the cliff". In other words, all that remains of our original 8 bits are the 4 left-most bits (the right-most 4 bits were pushed off of the cliff on the right side when we did the bit shift to the right)
ip_version = ip_byte0 >> 4
# Using the Bitwise AND operator "&"
ip_header_len = (ip_byte0 & 15) * 4
#
if ip_header_len < 20:
some_info = "IHL is < 20 bytes, something is wrong"
elif ip_header_len == 20:
some_info = "IHL = 20 bytes, No IP Options Present"
else:
some_info = "IHL > 20 bytes, IP Options are Present"
#
results = {}
results['ip_version'] = ip_version
results['ip_header_len'] = ip_header_len
results['info'] = some_info
return results
|
af8be564ec68d8ecc7f91d8483309a6312f42263
| 28,247
|
import argparse
def parse_arguments():
"""Parse and store arguments."""
desc = 'A simple punycode to unicode and back converter.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('domain', help='domain name to convert')
# Store the supplied args
return parser.parse_args()
|
a551e70258e97546dc72741a7666c00e3a43a14a
| 28,248
|
import torch
def get_paddings_indicator(actual_num, max_num, axis=0):
"""
计算mask
:param actual_num: torch.Tensor, 体素中实际有效的点数
:param max_num: int, 体素化过程中每一个体素的最大点数
:param axis: 默认为0
:return: torch.Tensor, 维度为 (batch_size, max_num), 一个典型的输出为:
tensor([[ True, True, False, ..., False, False, False],
[ True, True, True, ..., True, True, False],
[ True, True, True, ..., True, False, False],
...,
[ True, True, False, ..., False, False, False],
[ True, False, False, ..., False, False, False],
[ True, True, False, ..., False, False, False]])
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
|
91d67c2b5ad49ed108b1c63fb30374ec0f4d3263
| 28,250
|
import subprocess
import logging
import sys
def call_or_fail(cmd):
"""Call subprocess.check_output and return utf-8 decoded stdout or log+exit"""
try:
res = subprocess.check_output(cmd, shell=True).decode('utf-8')
except subprocess.CalledProcessError as err:
logging.info('Command \'{}\' failed with error: {}'.format(cmd, str(err)))
sys.exit(1)
return res
|
796ae33c0deb7f61de78c3e052290a50c7368c5b
| 28,251
|
import re
import string
def get_counts(counts_str):
"""
Return number of inclusion, exclusion and reads supporting
both isoforms.
"""
num_inc = 0
num_exc = 0
num_both = 0
fields = re.findall("(\(.{3}\):\d+)", counts_str)
isoforms = re.findall("\([01,]+\)", counts_str)[0]
isoforms = isoforms.translate(None, string.punctuation)
if len(isoforms) > 2:
return None
if len(fields) == 0:
return None
for field in fields:
iso_type, count = field.split(":")
count = int(count)
if iso_type == "(1,0)":
num_inc = count
elif iso_type == "(0,1)":
num_exc = count
elif iso_type == "(1,1)":
num_both = count
return num_inc, num_exc, num_both
|
412c6c459621846eba7eb562936ea3e65d343acc
| 28,253
|
def get_config(cfg):
"""
Sets the hypermeters (architecture) for ISONet using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"net_params": {
"use_dirac": cfg.ISON.DIRAC_INIT,
"use_dropout": cfg.ISON.DROPOUT,
"dropout_rate": cfg.ISON.DROPOUT_RATE,
"nc": cfg.DATASET.NUM_CLASSES,
"depths": cfg.ISON.DEPTH,
"has_bn": cfg.ISON.HAS_BN,
"use_srelu": cfg.ISON.SReLU,
"transfun": cfg.ISON.TRANS_FUN,
"has_st": cfg.ISON.HAS_ST,
}
}
return config_params
|
ea871170b7d70efde0601bbce340b3960227a459
| 28,254
|
import resource
def peak_memory() -> float:
"""
Peak memory in GB
"""
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024 * 1024)
|
7e7600b051da58eb7a2bb478cad97c4026856c36
| 28,255
|
def num2vhdl_slv(num, width=4):
""" Creates a VHDL slv (standard_logic_vector) string from
a number. The width in bytes can be specified.
Examples:
num2vhdl_slv(10, width=1) => x"0A"
num2vhdl_slv(0x10, width=2) => x"0010"
"""
return ('x"%0' + str(width * 2) + 'X"') % num
|
a9fb6ce594bdb8756d073ae268cb03dccda7592e
| 28,256
|
import jinja2
def ps_filter(val):
"""Jinja2 filter function 'ps' escapes for use in a PowerShell commandline"""
if isinstance(val, jinja2.Undefined):
return "[undefined]"
escaped = []
for char in str(val):
if char in "`$#'\"":
char = "`" + char
elif char == '\0':
char = "`0"
elif char == '\a':
char = "`a"
elif char == '\b':
char = "`b"
elif char == '\f':
char = "`f"
elif char == '\n':
char = "`n"
elif char == '\r':
char = "`r"
elif char == '\t':
char = "`t"
elif char == '\v':
char = "`v"
escaped.append(char)
return ''.join(escaped)
|
495cd87bfc930089aaa5f4f9b282d20b4883bfb5
| 28,257
|
import math
def calc_second(train_set_batch, test_set_batch, top_items, vad_set_batch, k):
"""[summary]
Args:
train_set_batch ([type]): [description]
test_set_batch ([type]): [description]
top_items ([type]): [description]
vad_set_batch ([type]): [description]
k ([type]): [description]
Returns:
[type]: [description]
"""
recall_k, precision_k, ndcg_k, hits_list = [], [], [], []
for train_set, test_set, ranked, vad_set in \
zip(train_set_batch, test_set_batch, top_items, vad_set_batch):
n_k = k if len(test_set) > k else len(test_set) # n_k = min(k, len(test_k))
n_idcg, n_dcg = 0, 0
for pos in range(n_k):
n_idcg += 1.0 / math.log(pos + 2, 2)
tops_sub_train = []
n_top_items = 0
for val in ranked:
if val not in train_set and val not in vad_set:
tops_sub_train.append(val)
n_top_items += 1
if n_top_items >= k: # 控制topK个item是从用户没交互过的商品中选的
break
hits_set = [(idx, item_id) for idx, item_id in \
enumerate(tops_sub_train) if item_id in test_set]
cnt_hits = len(hits_set)
for idx in range(cnt_hits):
n_dcg += 1.0 /math.log(hits_set[idx][0] + 2, 2)
precision_k.append(float(cnt_hits / k))
recall_k.append(float(cnt_hits / len(test_set)))
ndcg_k.append(float(n_dcg / n_idcg))
hits_list.append(cnt_hits)
return hits_list, precision_k, recall_k, ndcg_k
|
3a3d5e02724e499772ca730b2ca49da2bd3a2482
| 28,258
|
import operator
def multiply_round(n_data: int, cfg: dict):
"""
Given a configuration {split: percentage}, return a configuration {split: n} such that
the sum of all is equal to n_data
"""
print(cfg)
s_total = sum(cfg.values())
sizes = {name: int(s * n_data / s_total) for name, s in cfg.items()}
max_name = max(sizes.items(), key=operator.itemgetter(1))[0]
sizes[max_name] += n_data - sum(sizes.values())
return sizes
|
6727de1f6f9bc70aa9d5bb3b53ccf73381e4a86d
| 28,259
|
from typing import List
def get_combinations(candidates: List[int], target: int) -> List[List[int]]:
"""Returns a list of lists representing each possible set of drops.
This function (and its recursive helper function) was adapted from
https://wlcoding.blogspot.com/2015/03/combination-sum-i-ii.html.
Args:
candidates: A list of possible numbers of pieces to drop on a square. Effectively, this
arg is equivalent to range(1, carry_size + 1).
target: The number of stones in the carry_size. The number of dropped stones must equal the
number of stones picked up.
Returns:
A list of lists of possible combinations. Note that these lists do not contain every
permutation of drops, merely every combination of valid ints that sums to the target value.
"""
def get_combinations_rec(candidates, target, index, partial_sum, list_t, combinations) -> None:
"""A recursive helper function for get_combinations."""
if partial_sum == target:
combinations.append(list(list_t))
for i in range(index, len(candidates)):
if partial_sum + candidates[i] > target:
break
list_t.append(candidates[i])
get_combinations_rec(
candidates, target, i, partial_sum+candidates[i], list_t, combinations
)
list_t.pop()
combinations: List = []
get_combinations_rec(candidates, target, 0, 0, [], combinations)
return combinations
|
9d68f3b69c23697d924e40d4471296223871165a
| 28,260
|
import json
def simple_aimfree_assembly_state() -> dict:
"""
Fixture for creating the assembly system DT object for tests from a JSON file.
- Complexity: simple
"""
with open('tests/assets/simple/aimfree_assembly_state.json') as assembly_file:
aimfree_assembly_state = json.load(assembly_file)
return aimfree_assembly_state
|
a1cdd6e85a90d39604214d2d49a52cedbbc4b165
| 28,262
|
def check_keys(test_record):
"""Check the test_record contains the required keys
Args:
test_record: Test record metadata dict
Returns:
boolean result of key set comparison
"""
req_keys = {
'data',
'description',
'service',
'source',
'trigger'
}
optional_keys = {
'trigger_count',
'compress'
}
record_keys = set(test_record.keys())
return (
req_keys == record_keys or
any(x in test_record for x in optional_keys)
)
|
207a0a2c4177a35321e7944ca821a094613a9a47
| 28,263
|
import torch
def select_alias_word_sent(alias_pos_in_sent, sent_embedding):
"""Given a tensor of positions (batch x M), we subselect those embeddings
in the sentence embeddings. Requires that max(alias_pos_in_sent) <
sent_embedding.shape[1]
Args:
alias_pos_in_sent: position tensor (batch x M)
sent_embedding: sentence embedding to extract the position embedding (batch x seq_len x hidden_dim)
Returns:
tensor where item [b, i] is a hidden_dim sized vector from sent_embedding[b][alias_pos_in_sent[i]]
"""
# get alias words from sent embedding
# batch x seq_len x hidden_size -> batch x M x hidden_size
batch_size, M = alias_pos_in_sent.shape
_, seq_len, hidden_size = sent_embedding.shape
# expand so we can use gather
sent_tensor = sent_embedding.unsqueeze(1).expand(
batch_size, M, seq_len, hidden_size
)
# gather can't take negative values so we set them to the first word in the sequence
# we mask these out later
alias_idx_sent_mask = alias_pos_in_sent == -1
# copy the alias_pos_in_sent tensor to avoid overwrite errors and set where alias_post_in_sent == -1 to be 0;
# gather can't handle -1 indices
alias_pos_in_sent_cpy = torch.where(
alias_pos_in_sent == -1, torch.zeros_like(alias_pos_in_sent), alias_pos_in_sent
)
alias_word_tensor = torch.gather(
sent_tensor,
2,
alias_pos_in_sent_cpy.long()
.unsqueeze(-1)
.unsqueeze(-1)
.expand(batch_size, M, 1, hidden_size),
).squeeze(2)
# mask embedding values
alias_word_tensor[alias_idx_sent_mask] = 0
return alias_word_tensor
|
6940f4dfbfec9c995f156658cd10430023cee9fc
| 28,268
|
def implicit_valence(a):
""" Implicit valence of atom """
return a.GetImplicitValence()
|
88b0e7682e8d142d7f0e10caa37fe67bbe4fa2e2
| 28,270
|
import argparse
def parse_args():
"""
This is called via Casa so we have to be a bit careful
:return:
"""
parser = argparse.ArgumentParser('Get the arguments')
parser.add_argument('arguments', nargs='+', help='the arguments')
parser.add_argument('--nologger', action="store_true")
parser.add_argument('--log2term', action="store_true")
parser.add_argument('--logfile')
parser.add_argument('-c', '--call')
return parser.parse_args()
|
b02dac3950aa760c5e3413d075e3560c82d2429c
| 28,271
|
import os
def secureSecretFile(filename = '', mode = 0o640):
"""This implements a secure policy for a secret file (not allowed to be
read by thirds):
a) If the file already exists, change it to specified mode (default 640).
b) If does not exist, create it with the given mode
@return True if the file actually existed, False otherwise
"""
try:
# file actually does not exist
fd = os.open(filename, os.O_CREAT | os.O_EXCL, mode)
os.close(fd)
return False
except Exception as e:
# file exists, changing permissions
os.chmod(filename, mode)
return True
|
93f2348d7d9e49a886e0db1d05f3d7b6b5e5d0ae
| 28,273
|
def joint_card(c1,c2):
"""Given two cardinalities, combine them."""
return '{' + ('1' if c1[1] == c2[1] == '1' else '0') + ':' + ('1' if c1[3] == c2[3] == '1' else 'M') + '}'
|
f9df53869d68cd7c48916ede0396787caeebadaf
| 28,274
|
def _is_hierachy_searchable(child_id: str) -> bool:
""" If the suffix of a child_id is numeric, the whole hierarchy is searchable to the leaf nodes.
If the suffix of a child_id is alphabetic, the whole hierarchy is not searchable. """
pieces_of_child_id_list = child_id.split('.')
suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1]
return suffix.isnumeric()
|
13146128fc8ab050323a23f07133676caeb83aaf
| 28,275
|
import re
def make_job_def_name(image_name: str, job_def_suffix: str = "-jd") -> str:
"""
Autogenerate a job definition name from an image name.
"""
# Trim registry and tag from image_name.
if "amazonaws.com" in image_name:
image_name = image_name.split("/", 1)[1].split(":")[0].replace("/", "-")
# https://docs.aws.amazon.com/batch/latest/userguide/create-job-definition.html
# For Job definition name, enter a unique name for your job definition. Up to 128 letters
# (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
job_def_prefix = re.sub("[^A-Za-z_0-9-]", "", image_name)[: 128 - len(job_def_suffix)]
job_def_name = job_def_prefix + job_def_suffix
return job_def_name
|
e81e86e8df750434a1a9310d99256733ea0f8619
| 28,276
|
import os
def _this_dir():
"""
returns dirname for location of this file
py.test no longer allows fixtures to be called
directly so we provide a private function that can be
"""
return os.path.dirname(__file__)
|
7fa50ebdc766a018e3ff89fed49ca765d37ab812
| 28,277
|
import argparse
def parse_args():
"""Setup command line argument parser and return parsed args.
Returns:
:obj:`argparse.Namespace`: The resulting parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("output", help="The path to output the CSV files to.")
parser.add_argument(
"-c",
"--config",
help="The path to the config file to load. Default: %(default)s.",
default="/etc/netbox/scripts.cfg",
)
parser.add_argument("-v", "--verbose", help="Output more verbosity.", action="store_true")
def format_digest(value):
# work around some command-line parser limitations...
ret = []
for fmt in value.split(","):
if fmt not in ("csv", "json"):
raise argparse.ArgumentTypeError("Format must be one of csv, json")
ret.append(fmt)
return ret
parser.add_argument(
"-f",
"--format",
help="Choose which format(s) to output (comma-separated) Options: csv or json Default: %(default)s",
default="csv",
type=format_digest,
)
parser.add_argument("-m", "--makedir", help="Create the path specified. Default: %(default)s", action="store_true")
parser.add_argument(
"-t",
"--tables",
help=(
"A comma separated list of tables (expressed as API path similar to `dcim.device`) to include in the dump"
", or the special value `all`."
),
default="all",
type=lambda arg: arg.split(","),
)
args = parser.parse_args()
return args
|
6e25d97a707907c89c7b1620cb3421372ddcd412
| 28,278
|
import itertools
def generate_grid_search_trials(flat_params, nb_trials):
"""
Standard grid search. Takes the product of `flat_params`
to generate the search space.
:param params: The hyperparameters options to search.
:param nb_trials: Returns the first `nb_trials` from the
combinations space. If this is None, all combinations
are returned.
:return: A dict containing the hyperparameters.
"""
trials = list(itertools.product(*flat_params))
if nb_trials:
trials = trials[0:nb_trials]
return trials
|
d35072aa26fa62b60add89f36b4343ee4e93567b
| 28,279
|
def _find_user(username, sm):
"""extracted from flask_appbuilder.security.manager.BaseSecurityManager.find_user"""
user = sm.find_user(username)
auth_admin_user_list = sm.auth_admin_user_list
auth_role_admin = sm.auth_role_admin
auth_user_registration_role = sm.auth_user_registration_role
role = sm.find_role(auth_user_registration_role)
if auth_admin_user_list and username in auth_admin_user_list:
role = sm.find_role(auth_role_admin)
if not user:
user = sm.add_user(
username= username,
first_name= username,
last_name=username,
email=username + '@email.notfound',
role=role
)
else:
is_role_exists = sm.is_role_exists(role.name,user.roles)
if not is_role_exists:
user.roles.append(role)
sm.update_user(user)
return user
|
6e06850c3f4fc098270e9cafb1eef6ca84799acc
| 28,280
|
import json
def process_txt_data(filepath: str):
"""Converts a txt file format to the tree format needed to construct grammar"""
samples = open(filepath, "r").readlines()
split_lines = [line.split("|") for line in samples]
# Format of each sample is [text, action_dict]
formatted_data = [[text, json.loads(action_dict)] for text, action_dict in split_lines]
return formatted_data
|
61fd540696d3f8026a673d7c88603fb0cedf010c
| 28,281
|
def convert_string_list(string_list):
"""
Converts a list of strings (e.g. ["3", "5", "6"]) to a list of integers.
In: list of strings
Out: list of integers
"""
int_list = []
for string in string_list:
int_list.append(int(string))
return int_list
|
b75ba67c142796af13186bc4d7f67e3061a1d829
| 28,282
|
def ksf_cast(value, arg):
"""
Checks to see if the value is a simple cast of the arg
and vice-versa
"""
# untaint will simplify the casting... not in pypyt!
v = value
a = arg
a_type = type(a)
v_type = type(v)
if v_type == a_type:
return v == a
try:
casted_v = a_type(v)
if casted_v == a:
return True
except TypeError:
pass
except ValueError:
pass
try:
casted_a = v_type(a)
if casted_a == v:
return True
except TypeError:
pass
except ValueError:
pass
return False
|
ebe8679be4409a1a37d70bff74e374670c1e69a9
| 28,284
|
def aidstr(aid, ibs=None, notes=False):
""" Helper to make a string from an aid """
if not notes:
return 'aid%d' % (aid,)
else:
assert ibs is not None
notes = ibs.get_annot_notes(aid)
name = ibs.get_annot_names(aid)
return 'aid%d-%r-%r' % (aid, str(name), str(notes))
|
6102bb1b536f234e8ab65fc728a9bab19cbf4b18
| 28,285
|
import csv
def read_csv(file_name):
"""
Read csv file
:param file_name: <file_path/file_name>.csv
:return: list of lists which contains each row of the csv file
"""
with open(file_name, 'r') as f:
data = [list(line) for line in csv.reader(f)][2:]
return data
|
01f9aadc0bce949aa630ab050e480da24c53cc40
| 28,286
|
def inserir_webpage(conn, page):
"""
Inserir um nova pagina web na tabela apropridada
:param page (address[string],raw_content[string],article_summary[string],article_content[string]):
:return (ID do novo registro):
"""
sql = ''' INSERT INTO web_page(id_web_site,address,raw_content,article_summary,article_content)
VALUES(?,?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, page)
return cur.lastrowid
|
f27b98c941530b63872f5da9d265047f4296f604
| 28,287
|
def get_names_from_lines(lines, frac_len, type_function):
"""Take list of lines read from a file, keep the first fract_len
elements, remove the end of line character at the end of each
element and convert it to the type definded by function.
"""
return [type_function(line[:-1]) for line in lines[:frac_len]]
|
c9b42ef1388c0cd09b3d7d5e6a7381411438200e
| 28,288
|
import torch
def split_distinct(arr):
"""Given an input 1D tensor, return two lists of tensors, one list giving the unique values in the
input, and the other the list of tensors each of which gives the indices where the corresponding
value occurs.
This implementation involves sorting the input, which shouldn't be required, so it should be possible
to implement this more efficiently some other way, e.g. using a custom kernel."""
assert len(arr.shape) == 1
vals, indices = torch.sort(arr)
split_points = torch.nonzero(vals[1:] != vals[:-1])[:, 0] + 1
split_sizes = [split_points[0].item(),
*(split_points[1:] - split_points[:-1]).tolist(),
(arr.shape[0] - split_points[-1]).item()]
indices_list = torch.split(indices, split_sizes)
# indices_list = torch.tensor_split(indices, list(split_points + 1)) # Use this once `tensor_split` is stable
values_list = [vals[0].item(), *vals[split_points].tolist()]
return values_list, indices_list
|
02025d6c76f01077338cd19bffb4c6fb428ef368
| 28,290
|
def check_nnurl(sockurl):
"""nanomsgのURLを事前にチェックする"""
# いまのところipc:/だけ
if sockurl.startswith('ipc:///'):
path = sockurl[6:]
# unix socketのpathはmax 90文字程度らしい
# http://pubs.opengroup.org/onlinepubs/007904975/basedefs/sys/un.h.html
if len(path) > 90:
return False
return True
|
5e7434db035b713bb062153f5b43eee814319f93
| 28,291
|
def execroi(img, roi):
"""
Args:
img(np.array): 2 dimensions
roi(2-tuple(2-tuple))
Returns:
np.array: cropped image
"""
return img[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]
|
4c59dd52186888b2b1c43007a00301a43237dcb3
| 28,292
|
def selected_features_to_constraints(feats, even_not_validated=False):
"""
Convert a set of selected features to constraints.
Only the features that are validated are translated into constraints,
otherwise all are translated when `even_not_validated` is set.
:return: str
"""
res = ""
for sel in reversed(feats):
sel_str = sel['other']
mode = sel['mode']
if sel['validated'] or even_not_validated:
if mode == 'selected':
res += "[ " + sel_str + " ]" + "\n"
elif mode == 'rejected':
res += "[ !" + sel_str + " ]" + "\n"
return res
|
e029c26b0e53874b4076c9a4d9065a558736f565
| 28,293
|
def iso_string_to_sql_utcdatetime_mysql(x: str) -> str:
"""
Provides MySQL SQL to convert an ISO-8601-format string (with punctuation)
to a ``DATETIME`` in UTC. The argument ``x`` is the SQL expression to be
converted (such as a column name).
"""
return (
f"CONVERT_TZ(STR_TO_DATE(LEFT({x}, 26),"
f" '%Y-%m-%dT%H:%i:%s.%f'),"
f" RIGHT({x}, 6)," # from timezone
f" '+00:00')" # to timezone
)
# In MySQL:
# 1. STR_TO_DATE(), with the leftmost 23 characters,
# giving microsecond precision, but not correct for timezone
# 2. CONVERT_TZ(), converting from the timezone info in the rightmost 6
# characters to UTC (though losing fractional seconds)
|
1117b228c77f187b5884aeb014f2fb80309ea93a
| 28,294
|
def extract_lists (phrase):
"""IF <phrase> = [ITEM1,ITEM2,[LIST1,LIST2]] yields
[ITEM1,ITEM2,LIST1,LIST2]"""
def list_of_lists (phrase):
# TRUE if every element of <phrase> is a list
for x in phrase:
if not isinstance(x,list):
return False
return True
def some_lists (phrase):
# True if some elements of <phrase> are lists.
for x in phrase:
if isinstance(x,list):
return True
return False
def extract (x):
# Recursive function to extract lists.
returnlist = []
if not isinstance(x,list):
returnlist = x
elif not some_lists(x):
returnlist = x
elif not list_of_lists(x):
returnlist = x
else:
# For a list composed of lists
for y in x:
if isinstance(y,list) and not list_of_lists(y):
returnlist.append(extract(y))
else:
for z in y:
# Extracts elements of a list of lists into the containing list
returnlist.append((extract(z)))
return returnlist
return extract(phrase)
|
84c3a7d20a56506a96d458f4e6e2c2f835d1607e
| 28,296
|
def compute_rmsd(frame, ref):
"""Compute RMSD between a reference and a frame"""
return ref.rmsd(frame)
|
b189453a409d02279851bd492a95757d1d25bccc
| 28,297
|
def iou_score(SR, GT):
"""Computes the IOU score"""
smooth = 1e-8
SR = (SR > 0.5).float()
inter = SR * GT
union = SR + GT
return inter.sum() / (union.sum() + smooth)
|
05627c3b5c62422318a2968bab0a5cfe4430b3b6
| 28,298
|
def docstring_parameter(**kwargs):
""" Decorates a function to update the docstring with a variable. This
allows the use of (global) variables in docstrings.
Example:
@docstring_parameter(config_file=CONFIG_FILE)
myfunc():
\"\"\" The config file is {config_file} \"\"\"
Args:
**kwargs: Declarations of string formatters.
Raises:
KeyError: If formatters are not found in the docstring.
"""
def decorate(obj):
obj.__doc__ = obj.__doc__.format(**kwargs)
return obj
return decorate
|
c69505037948f120a7c29ee500f2327001e8b80d
| 28,299
|
def arg_to_dict(arg):
"""Convert an argument that can be None, list/tuple or dict to dict
Example::
>>> arg_to_dict(None)
[]
>>> arg_to_dict(['a', 'b'])
{'a':{},'b':{}}
>>> arg_to_dict({'a':{'only': 'id'}, 'b':{'only': 'id'}})
{'a':{'only':'id'},'b':{'only':'id'}}
:return: dict with keys and dict arguments as value
"""
if arg is None:
arg = []
try:
arg = dict(arg)
except ValueError:
arg = dict.fromkeys(list(arg), {})
return arg
|
adc75f811d02770b34be2552445b192e33401e76
| 28,302
|
def _triangle(m, yr, cutoff):
"""
Define triangular filter weights
"""
wgt = cutoff - abs(yr)
wgt[wgt < 0] = 0
return wgt
|
1ce115c440950ef165b416e9011ca5e352289dd3
| 28,303
|
def extract_multiple_tracts_demand_ratios(pop_norm_df, intermediate_ids):
"""
Extract fraction of target/intermediate geometry demand based on mapping.
Inputs list of target/intermediate geometry IDs and returns dictionary
with keys of intersecting source geometry IDs and the demand fraction
associated with the target/intermediate geometries.
Example:
Provide list of tract/county IDs and the scaled intersection matrix
according to which demand has been allocated (e.g. population mapping).
It will return dictionary of demand area IDs and the fraction of their
demand associated with the list of tract/count IDs. Used as intermediate
step to outputting time series of intermediate/source demands
Args:
pop_norm_df (pandas.DataFrame): matrix mapping between source and
target/intermediate IDs (usually normalized population matrix)
intermediate_ids (list): list of tract or other intermediate IDs
Returns:
dict: Dictionary of keys demand area IDs and the fraction of demand
allocated to the list of intermediate geometry IDs
"""
intermediate_demand_ratio_dict = pop_norm_df.loc[intermediate_ids].sum(
).to_dict()
dict_area = pop_norm_df.sum(axis=0).to_dict()
return {k: v / dict_area[k] for k, v in intermediate_demand_ratio_dict.items() if v != 0}
|
22557011946b09e0270a75df0a615d5a834de06d
| 28,304
|
def dot( vec1, vec2 ):
"""
Creating the dot product
"""
mul = [i * j for i, j in zip(vec1, vec2)] # i is self.row, j = other.col, multiply each thing in vec
add = sum(mul)
return add
|
f08464f38d6dcbd4da8f52c5a9e60160327ab014
| 28,305
|
def multiply(x, y):
"""multiplyFunction"""
return x*y
|
33f8b0fbda7dd1fe01c2f49681b6b91eaaa205d4
| 28,307
|
import re
def parse_time_acc_loss(time_acc_loss_path, stop_epoch=100, stop_acc=1000, stop_loss=0, time_ratio=1):
"""
:param time_acc_loss_path: the path of time_acc_loss.txt
:return: epochs, times, wall_clocks, losses, accs, before a stop_epoch
"""
epochs, times, wall_clocks, losses, accs = [], [], [], [], []
with open(time_acc_loss_path) as f:
for line in f.readlines():
numbers = re.findall(r"\d+\.?\d*", line)
epoch, time, wall_clock, loss, acc = int(numbers[0]), float(numbers[1]), float(numbers[2]), float(
numbers[3]), float(numbers[4])
epochs.append(epoch)
times.append(time*time_ratio)
wall_clocks.append(wall_clock)
losses.append(loss)
accs.append(acc)
if epoch > stop_epoch or acc > stop_acc or loss < stop_loss:
break
return epochs, times, wall_clocks, losses, accs
|
a31e7f7744c325225de181c1ff65c906f4b5a674
| 28,308
|
def get_help_text(impl):
""" get the help text based on pydoc strings """
text = impl.__doc__ or ""
return text.strip().split("\n")[0].strip()
|
aca302f5b12aef78f3d554d57ef6300db39e6d98
| 28,309
|
def enum(**named_values):
"""
Create an enum with the following values.
:param named_values:
:return: enum
:rtype: Enum
"""
return type('Enum', (), named_values)
|
794007a79e43c3ff4af2f70efa3817c224e42bd7
| 28,310
|
def fib_1_recursive(n):
"""
Solution: Brute force recursive solution.
Complexity:
Description: Number of computations can be represented as a binary
tree has height of n.
Time: O(2^n)
"""
if n < 0:
raise ValueError('input must be a positive whole number')
if n in [0, 1]:
return n
return fib_1_recursive(n - 2) + fib_1_recursive(n - 1)
|
beb4a726075fed152da34706394ac1bd7ef29f17
| 28,311
|
import logging
def impute_station_name(tags):
"""Make sure that a StationName is present or introduce a sensible alternative"""
if not tags.get("StationName"):
try:
tags["StationName"] = tags["DeviceSerialNumber"]
except:
try:
tags["StationName"] = tags["X-Ray Radiation Dose Report"]["Device Observer UID"]
except:
tags["StationName"] = "Unknown"
logger = logging.getLogger("DcmSimplify")
logger.warning('No station name identified')
return tags
|
a40a36971434a6a3bb201ac5856077af5145b6ef
| 28,312
|
from typing import Any
def try_catch_replacement(block: Any, get_this: str, default: Any) -> str:
"""
replacement for the try catch blocks. HELPER FUNCTION
:param block:
:param get_this:
:param default:
:return:
"""
try:
return block[get_this]
except KeyError:
return default
|
a783c26709cee83d9c3bce4ce4d5a79837a4c754
| 28,313
|
def crop_center(img,cropx,cropy,cropz):
"""
Crops out the center of a 3D volume
"""
x,y,z = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
startz = z//2-(cropz//2)
return img[startx:startx+cropx,starty:starty+cropy,startz:startz+cropz]
|
a003fb7fdbcee5e6d4a8547a2f50fa82181bdf37
| 28,314
|
def get_text(spec):
"""Reads the contents of the given file"""
with open(spec) as fh: return fh.read()
|
21766304777d483af403375678389519ca1bcfe1
| 28,315
|
import os
def get_launch_env(envlist, envall, env, path):
"""Given command line arguments, build up the environment array"""
# First handle the arguments that export existing environment
if envlist:
# Copy the listed keys for -envlist
environ = {
key: os.environ[key] for key in envlist.split(",") if key in os.environ
}
elif envall:
# Copy the whole environment for -envall
environ = os.environ.copy()
else:
# Otherwise start from scratch
environ = {}
# Next add environment variables explicitly called out
# Overrides anything already set
if env:
for key, val in env:
environ[key] = val
# Add path argument if given
if path:
environ["PATH"] = path
# Format into array in the expected format
return ["%s=%s" % (key, val) for key, val in environ.items()]
|
359c9aca3de825214401e8b63833d3f0fc774e92
| 28,318
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.