content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_success(database):
""" Test that calculation works for equal values and for null """
value = Decimal('100.23')
approp = AppropriationFactory(status_of_budgetary_resour_cpe=value, total_budgetary_resources_cpe=value)
approp_null = AppropriationFactory(status_of_budgetary_resour_cpe=0, total_budgetary_resources_cpe=None)
errors = number_of_errors(_FILE, database, models=[approp, approp_null])
assert errors == 0
| 19,300
|
def get_pcap_path(name):
"""Given a pcap's name in the test directory, returns its full path."""
return os.path.join(PCAPS_DIR, name)
| 19,301
|
def text_dataset_construction(train_or_test, janossy_k, task, janossy_k2, sequence_len, all_data_size=0):
""" Data Generation """
janossy_k = 1
janossy_k2 = 1
args = parse_args()
task = str(args.task).lower()
X = np.load('../data_'+str(task)+str(sequence_len)+'.npy')
output_X = np.load('../label_'+str(task)+str(sequence_len)+'.npy')
output_X = np.reshape(output_X,(output_X.shape[0],1))
total_len = X.shape[0]
if (all_data_size > 0):
total_len = all_data_size
train_len = int(total_len*0.4)
valid_len = int(total_len*0.2)
NUM_TRAINING_EXAMPLES = train_len
NUM_VALIDATION_EXAMPLES = valid_len
NUM_TEST_EXAMPLES = total_len - train_len - valid_len
#pdb.set_trace()
if train_or_test == 1:
X = X[0:train_len]
output_X = output_X[0:train_len]
num_examples = NUM_TRAINING_EXAMPLES
elif train_or_test == 2:
X = X[train_len:train_len+valid_len]
output_X = output_X[train_len:train_len+valid_len]
num_examples = NUM_VALIDATION_EXAMPLES
elif train_or_test == 0:
X = X[train_len+valid_len:]
output_X = output_X[train_len+valid_len:]
num_examples = NUM_TEST_EXAMPLES
set_numbers = X.shape[1]
train_length = X.shape[0]
if janossy_k == 1 and janossy_k2 == 1:
return X, output_X
else:
X_janossy = janossy_text_input_construction(X, janossy_k,janossy_k2)
return X_janossy, output_X
| 19,302
|
def run_illum(args):
"""Run illumination correction.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
"""
if args.file_list is not None:
args.images.extend([fn.rstrip() for fn in args.file_list])
il = pre.find_background_illumination(args.images, args.radius,
args.quantile, args.stretchlim,
args.use_mask, args.mask_offset,
args.mask_close, args.mask_erode)
if args.verbose:
print 'illumination field:', type(il), il.dtype, il.min(), il.max()
if args.save_illumination is not None:
io.imsave(args.save_illumination, il / il.max())
base_fns = [pre.basefn(fn) for fn in args.images]
ims_out = [fn + args.output_suffix for fn in base_fns]
mask_fns = [fn + '.mask.tif' for fn in base_fns]
ims = (io.imread(fn) for fn in args.images)
for im, fout, mask_fn in it.izip(ims, ims_out, mask_fns):
if os.path.isfile(mask_fn):
mask = io.imread(mask_fn).astype(bool)
else:
mask = np.ones(im.shape, bool)
im = pre.correct_image_illumination(im, il,
args.stretchlim_output, mask)
io.imsave(fout, im)
| 19,303
|
def neo_vis(task_id):
"""
Args:
task_id:
Returns:
"""
project = get_project_detail(task_id, current_user.id)
return redirect(
url_for(
"main.neovis_page",
port=project["remark"]["port"],
pwd=project["remark"]["password"],
)
)
| 19,304
|
def getfont(
fontname=None,
fontsize=None,
sysfontname=None,
bold=None,
italic=None,
underline=None):
"""Monkey-patch for ptext.getfont().
This will use our loader and therefore obey our case validation, caching
and so on.
"""
fontname = fontname or ptext.DEFAULT_FONT_NAME
fontsize = fontsize or ptext.DEFAULT_FONT_SIZE
key = (
fontname,
fontsize,
sysfontname,
bold,
italic,
underline
)
if key in ptext._font_cache:
return ptext._font_cache[key]
if fontname is None:
font = ptext._font_cache.get(key)
if font:
return font
font = pygame.font.Font(fontname, fontsize)
else:
font = fonts.load(fontname, fontsize)
if bold is not None:
font.set_bold(bold)
if italic is not None:
font.set_italic(italic)
if underline is not None:
font.set_underline(underline)
ptext._font_cache[key] = font
return font
| 19,305
|
def append_medications(sms_message, end_date, patient):
""" Queries all of the patients current medications as determined
by the Medication.edn_date is null or after the 'end_date'
parameter.
Appends a new line to the message with the medication information
as:
name dose dose_unit times per interval interval_unit
"""
line_template = u"{displayName}: {dose} {dose_unit} {dose_times} per {interval} {interval_unit}"
medqs = Medication.objects.filter(Q(end_date__isnull=True) | Q(end_date__gt=end_date),
encounter__visit__patient=patient).select_related('category','dose_unit', 'interval_unit')
for med in medqs:
new_line = line_template.format(
displayName=med.category.displayName,
dose=med.dose,
dose_unit=med.dose_unit.displayName,
dose_times=med.times,
interval=med.interval,
interval_unit=med.interval_unit.displayName)
sms_message.add_line(new_line)
| 19,306
|
def change_box(base_image,box,change_array):
"""
Assumption 1: Contents of box are as follows
[x1 ,y2 ,width ,height]
"""
height, width, _ = base_image.shape
new_box = [0,0,0,0]
for i,value in enumerate(change_array):
if value != 0:
new_box[i] = box[i] + value
else:
new_box[i] = box[i]
assert new_box[0] >= 0
assert new_box[1] >= 0
assert new_box[0]+new_box[2] <= width
assert new_box[1]+new_box[3] <= height
return new_box
| 19,307
|
def fetchRepositoryFilter(critic, filter_id):
"""Fetch a RepositoryFilter object with the given filter id"""
assert isinstance(critic, api.critic.Critic)
return api.impl.filters.fetchRepositoryFilter(critic, int(filter_id))
| 19,308
|
def test_to_gbq_w_default_project(mock_bigquery_client):
"""If no project is specified, we should be able to use project from
default credentials.
"""
import google.api_core.exceptions
from google.cloud.bigquery.table import TableReference
mock_bigquery_client.get_table.side_effect = google.api_core.exceptions.NotFound(
"my_table"
)
gbq.to_gbq(DataFrame(), "my_dataset.my_table")
mock_bigquery_client.get_table.assert_called_with(
TableReference.from_string("default-project.my_dataset.my_table")
)
mock_bigquery_client.create_table.assert_called_with(mock.ANY)
table = mock_bigquery_client.create_table.call_args[0][0]
assert table.project == "default-project"
| 19,309
|
def load_it(file_path: str, verbose: bool = False) -> object:
"""Loads from the given file path a saved object.
Args:
file_path: String file path (with extension).
verbose: Whether to print info about loading successfully or not.
Returns:
The loaded object.
Raises:
None.
"""
obj = None
with open(file_path, 'rb') as handle:
obj = pk.load(handle)
if verbose:
print('{} is successfully loaded.'.format(file_path))
return obj
| 19,310
|
def test_orderstorage__Orderstorage__down__3(storage):
"""It can move the given item `delta` positions down."""
storage.down('foo2', 'bar', 2)
assert ['foo1', 'foo3', 'foo4', 'foo2'] == storage.byNamespace('bar')
| 19,311
|
def rename():
""" Rename all directories to [firstname lastname]. """
msg.info('Renaming...')
moodle_sub_specifier = '_assignsubmission_file_'
g = glob.glob(f'submission/*{moodle_sub_specifier}')
while len(g) == 0:
if msg.ask_yn('Directory names do not match, remove /submission and retry?',
msgtype='warn'):
shutil.rmtree('submission')
msg.info('Extracting...')
z = glob.glob('CSC 116*.zip')
if len(z) == 1:
unzip(z[0])
g = glob.glob(f'submission/*{moodle_sub_specifier}')
for entry in g:
print(msg.align_left(msg.name(entry.split('/')[1]), 80))
entry_new = entry.split('__')[0]
shutil.move(entry, entry_new)
print(f'\t-> {msg.name(entry_new.split("/")[1])}')
msg.info(f'Renamed to [lastname firstname]')
msg.press_continue()
| 19,312
|
def connect_contigs(contigs, align_net_file, fill_min, out_dir):
"""Connect contigs across genomes by forming a graph that includes
net format aligning regions and contigs. Compute contig components
as connected components of that graph."""
# construct align net graph and write net BEDs
if align_net_file is None:
graph_contigs_nets = nx.Graph()
else:
graph_contigs_nets = make_net_graph(align_net_file, fill_min, out_dir)
# add contig nodes
for ctg in contigs:
ctg_node = GraphSeq(ctg.genome, False, ctg.chr, ctg.start, ctg.end)
graph_contigs_nets.add_node(ctg_node)
# intersect contigs BED w/ nets BED, adding graph edges.
intersect_contigs_nets(graph_contigs_nets, 0, out_dir)
intersect_contigs_nets(graph_contigs_nets, 1, out_dir)
# find connected components
contig_components = []
for contig_net_component in nx.connected_components(graph_contigs_nets):
# extract only the contigs
cc_contigs = [contig_or_net for contig_or_net in contig_net_component if contig_or_net.net is False]
if cc_contigs:
# add to list
contig_components.append(cc_contigs)
# write summary stats
comp_out = open('%s/contig_components.txt' % out_dir, 'w')
for ctg_comp in contig_components:
ctg_comp0 = [ctg for ctg in ctg_comp if ctg.genome == 0]
ctg_comp1 = [ctg for ctg in ctg_comp if ctg.genome == 1]
ctg_comp0_nt = sum([ctg.end-ctg.start for ctg in ctg_comp0])
ctg_comp1_nt = sum([ctg.end-ctg.start for ctg in ctg_comp1])
ctg_comp_nt = ctg_comp0_nt + ctg_comp1_nt
cols = [len(ctg_comp), len(ctg_comp0), len(ctg_comp1)]
cols += [ctg_comp0_nt, ctg_comp1_nt, ctg_comp_nt]
cols = [str(c) for c in cols]
print('\t'.join(cols), file=comp_out)
comp_out.close()
return contig_components
| 19,313
|
def train(loader, model, crit, opt, epoch):
"""Training of the CNN.
Args:
loader (torch.utils.data.DataLoader): Data loader
model (nn.Module): CNN
crit (torch.nn): loss
opt (torch.optim.SGD): optimizer for every parameters with True
requires_grad in model except top layer
epoch (int)
"""
batch_time = AverageMeter()
losses = AverageMeter()
data_time = AverageMeter()
forward_time = AverageMeter()
backward_time = AverageMeter()
# switch to train mode
model.train()
# create an optimizer for the last fc layer
optimizer_tl = torch.optim.SGD(
model.top_layer.parameters(),
lr=args.lr,
weight_decay=10 ** args.wd,
)
end = time.time()
print(epoch)
for i, (input_tensor, target) in enumerate(loader):
data_time.update(time.time() - end)
# save checkpoint
n = len(loader) * epoch + i
input_var = torch.autograd.Variable(input_tensor.cuda())
target_var = torch.autograd.Variable(target.cuda())
output = model(input_var)
loss = crit(output, target_var)
# record loss
# losses.update(loss.data[0], input_tensor.size(0))
# compute gradient and do SGD step
opt.zero_grad()
optimizer_tl.zero_grad()
loss.backward()
opt.step()
optimizer_tl.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# sava_params(epoch, model, opt, r'mobilenetv1_30')
if (epoch + 1) / 10 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_10')
if (epoch + 1) / 30 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_30')
if (epoch + 1) / 60 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_60')
if (epoch + 1) / 90 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_90')
if (epoch + 1) / 100 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_100')
return losses.avg
| 19,314
|
def test_encrypt():
"""Test encrypting a paste."""
p = create_paste("test_encrypt")
key = p.encrypt()
assert key is not None
with pytest.raises(ValueError):
p.encrypt()
| 19,315
|
async def create_add_ci_failure_summary(gh, context, comment_url, ci_link,
shortId, pr_num, comment_list,
commits_url):
"""gradually find failed CI"""
hyperlink_format = '<a href="{link}">{text}</a>'
failed_header = "## 🕵️ CI failures summary\r\n"
failed_template = "🔍 Commit ID: <b>%s</b> contains failed CI.\r\n"
failed_ci_bullet = "- <b>Failed: %s</b>"
failed_ci_hyperlink = hyperlink_format.format(link=ci_link, text=context)
if len(comment_list) == 0:
if ci_link.startswith('https://xly.bce.baidu.com'):
error_message = failed_header + failed_template % str(
shortId) + failed_ci_bullet % failed_ci_hyperlink
logger.info(
"Successful trigger logic for CREATE XLY bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.post(comment_url, data={"body": error_message})
await clean_parent_comment_list(gh, commits_url, pr_num, shortId)
else:
error_message = failed_header + failed_template % str(
shortId) + failed_ci_bullet % context
logger.info(
"Successful trigger logic for CREATE TC bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.post(comment_url, data={"body": error_message})
await clean_parent_comment_list(gh, commits_url, pr_num, shortId)
else:
logger.info("comment_list: %s" % comment_list)
for i in range(len(comment_list)):
comment_sender = comment_list[i]['user']['login']
comment_body = comment_list[i]['body']
update_url = comment_list[i]['url']
if comment_sender == "paddle-bot[bot]" and comment_body.startswith(
'## 🕵️'):
split_body = comment_body.split("\r\n")
context_list = re.findall(r"\">(.+?)</a></b>", comment_body)
if ci_link.startswith('https://xly.bce.baidu.com'):
IsExit = True
for j in range(len(context_list)):
logger.info("context:%s" % context)
if context == context_list[j]:
IsExit = False
latest_body = comment_body.replace(
"\r\n" + split_body[j + 2], '')
update_message = latest_body + "\r\n" + failed_ci_bullet % failed_ci_hyperlink
logger.info(
"Successful trigger logic for REMOVING and ADDING XLY bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.patch(
update_url, data={"body": update_message})
if IsExit is True:
update_message = comment_body + "\r\n" + failed_ci_bullet % failed_ci_hyperlink
logger.info(
"Successful trigger logic for ADDING XLY bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
logger.info("update_message: %s" % update_message)
await gh.patch(
update_url, data={"body": update_message})
else:
corrected_ci = failed_ci_bullet % context
if corrected_ci in split_body:
latest_body = comment_body.replace(
"\r\n" + corrected_ci, '')
update_message = latest_body + "\r\n" + failed_ci_bullet % context
logger.info(
"Successful trigger logic for ADDING TC bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.patch(
update_url, data={"body": update_message})
else:
update_message = comment_body + "\r\n" + failed_ci_bullet % context
await gh.patch(
update_url, data={"body": update_message})
elif comment_sender == "paddle-bot[bot]" and comment_body.startswith(
'✅️'):
if ci_link.startswith('https://xly.bce.baidu.com'):
update_message = failed_header + failed_template % str(
shortId) + failed_ci_bullet % failed_ci_hyperlink
logger.info(
"Successful trigger logic for CHANGE Success Comment to XLY bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.delete(update_url)
await gh.post(comment_url, data={"body": update_message})
else:
update_message = failed_header + failed_template % str(
shortId) + failed_ci_bullet % context
logger.info(
"Successful trigger logic for CHANGE Success Comment to TC bullet. pr num: %s; sha: %s"
% (pr_num, shortId))
await gh.delete(update_url)
await gh.post(comment_url, data={"body": update_message})
| 19,316
|
def _h1_cmp_prnt_ ( h1 ,
h2 ,
head1 = '' ,
head2 = '' ,
title = '' ,
density = False ,
max_moment = 10 ,
exp_moment = True ,
prefix = '' ) :
""" Calculate and print some statistic information for two histos
>>> h1 , h2 = ...
>>> h1.cmp_prnt ( h2 )
"""
assert isinstance ( h1 , ROOT.TH1 ) and 1 == h1.dim () , \
"cmp_prnt: invalid type of h1 %s/%s" % ( h1 , type ( h1 ) )
if isinstance ( h2 , ROOT.TH1 ) :
assert 1 == h2.dim () , "cmp_prnt: invalid type of h2 %s/%s" % ( h2 , type ( h2 ) )
if density :
h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1
h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2
cmp = _h1_cmp_prnt_ ( h1_ , h2_ ,
head1 = head1 ,
head2 = head2 ,
title = title ,
density = False ,
prefix = prefix ,
max_moment = max_moment ,
exp_moment = exp_moment )
if h1_ is not h1 : del h1_
if h2_ is not h2 : del h2_
return cmp
if not head1 : head1 = h1.GetName()
if not head2 : head2 = h2.GetName()
fmt = '%+11.4g +- %-10.4g'
wid0 = 25
values = [ 'Mean' ,
'Rms' ,
'Skewness' ,
'Kurtosis' ]
numbers = []
mean = h1.mean () , h2.mean ()
rms = h1.rms () , h2.rms ()
skew = h1.skewness () , h2.skewness ()
kurt = h1.kurtosis () , h2.kurtosis ()
numbers.append ( mean )
numbers.append ( rms )
numbers.append ( skew )
numbers.append ( kurt )
if 4 < max_moment :
for i in range ( 5 , max_moment + 1 ) :
v1 = h1.stdMoment ( i , exp_moment )
v2 = h2.stdMoment ( i , exp_moment )
item = v1 , v2
numbers.append ( item )
if exp_moment : values .append ( 'ExpMom/%d' % i )
else : values .append ( 'StdMom/%d' % i )
numbers = tuple ( numbers )
values = tuple ( values )
wid1 = max ( len ( v ) for v in values )
wid1 = max ( wid1 , len ( 'Quantity' ) )
wid2 = max ( wid0 , len ( head1 ) )
wid3 = max ( wid0 , len ( head2 ) )
wid4 = max ( wid0 , len ( 'Delta' ) )
header = ( ( '{:^%d}' % wid1 ).format ( 'Quantity' ) ,
( '{:^%d}' % wid2 ).format ( head1 ) ,
( '{:^%d}' % wid3 ).format ( head2 ) ,
( '{:^%d}' % wid4 ).format ( 'Delta' ) )
table_data = [ header ]
for v , item in zip ( values , numbers ) :
v1 , v2 = item
dv = v1 - v2
row = allright ( v ) , v1.toString ( fmt ) , v2.toString( fmt ) , dv.toString ( fmt )
table_data.append ( row )
title = title if title else '%s vs %s' % ( head1 , head2 )
import ostap.logger.table as T
return T.table ( table_data , title = title , prefix = prefix )
| 19,317
|
def add_scatter(x, scatter, in_place=False):
"""
Add a Gaussian scatter to x.
Parameters
----------
x : array_like
Values to add scatter to.
scatter : float
Standard deviation (sigma) of the Gaussian.
in_place : bool, optional
Whether to add the scatter to x in place or return a
new array.
Returns
-------
x : array_like
x with the added scatter.
"""
if in_place:
x += np.random.randn(*x.shape)*float(scatter)
else:
x = np.asarray(x)
x = x + np.random.randn(*x.shape)*float(scatter)
return x
| 19,318
|
def validate_job_state(state):
"""
Validates whether a returned Job State has all the required fields with the right format.
If all is well, returns True,
otherwise this prints out errors to the command line and returns False.
Can be just used with assert in tests, like "assert validate_job_state(state)"
"""
required_fields = {
"job_id": str,
"user": str,
"wsid": int,
"authstrat": str,
"job_input": dict,
"updated": int,
"created": int,
"status": str,
}
optional_fields = {
"estimating": int,
"queued": int,
"running": int,
"finished": int,
"error_code": int,
"terminated_code": int,
"errormsg": str,
}
timestamp_fields = [
"created",
"updated",
"estimating",
"queued",
"running",
"completed",
]
# fields that have to be present based on the context of different statuses
valid_statuses = vars(Status)["_member_names_"]
status_context = {
"estimating": ["estimating"],
"running": ["running"],
"completed": ["completed"],
"error": ["error_code", "errormsg"],
"terminated": ["terminated_code"],
}
# 1. Make sure required fields are present and of the correct type
missing_reqs = list()
wrong_reqs = list()
for req in required_fields.keys():
if req not in state:
missing_reqs.append(req)
elif not isinstance(state[req], required_fields[req]):
wrong_reqs.append(req)
if missing_reqs or wrong_reqs:
print(f"Job state is missing required fields: {missing_reqs}.")
for req in wrong_reqs:
print(
f"Job state has faulty req - {req} should be of type {required_fields[req]}, but had value {state[req]}."
)
return False
# 2. Make sure that context-specific fields are present and the right type
status = state["status"]
if status not in valid_statuses:
print(f"Job state has invalid status {status}.")
return False
if status in status_context:
context_fields = status_context[status]
missing_context = list()
wrong_context = list()
for field in context_fields:
if field not in state:
missing_context.append(field)
elif not isinstance(state[field], optional_fields[field]):
wrong_context.append(field)
if missing_context or wrong_context:
print(f"Job state is missing status context fields: {missing_context}.")
for field in wrong_context:
print(
f"Job state has faulty context field - {field} should be of type {optional_fields[field]}, but had value {state[field]}."
)
return False
# 3. Make sure timestamps are really timestamps
bad_ts = list()
for ts_type in timestamp_fields:
if ts_type in state:
is_second_ts = is_timestamp(state[ts_type])
if not is_second_ts:
print(state[ts_type], "is not a second ts")
is_ms_ts = is_timestamp(state[ts_type] / 1000)
if not is_ms_ts:
print(state[ts_type], "is not a millisecond ts")
if not is_second_ts and not is_ms_ts:
bad_ts.append(ts_type)
if bad_ts:
for ts_type in bad_ts:
print(
f"Job state has a malformatted timestamp: {ts_type} with value {state[ts_type]}"
)
raise MalformedTimestampException()
return True
| 19,319
|
def get_urls(session):
"""
Function to get all urls of article in a table.
:param session: session establishes all conversations with the database and represents a “holding zone”.
:type session: sqlalchemy.session
:returns: integer amount of rows in table
"""
url = session.query(Article.url)
return [u[0] for u in url]
| 19,320
|
def iter_nonce_offsets(fh: BinaryIO, real_size: int = None, maxrange: int = 1024) -> Iterator[int]:
"""Returns a generator that yields nonce offset candidates based on encoded real_size.
If real_size is None it will automatically determine the size from fh.
It tries to find the `nonce offset` using the following structure.
``| nonce (dword) | encoded_size (dword) | encoded MZ + payload |``
Side effects: file handle position due to seeking
Args:
fh: file like object
real_size: encoded_size to search for, or automatically determined from fh if None.
maxrange: maximum range to search for
Yields:
nonce_offset candidates
"""
if real_size is None:
fh.seek(0, io.SEEK_END)
real_size = fh.tell()
for i in range(maxrange):
fh.seek(i)
nonce = fh.read(4)
size = fh.read(4)
if len(nonce) != 4 or len(size) != 4:
break
decoded_size = u32(xor(nonce, size))
if decoded_size + i + 8 == real_size:
logger.debug("FOUND real_size, iter_nonce_offsets -> %u", i)
yield i
| 19,321
|
def check_min_package_version(package, minimum_version, should_trunc_to_same_len=True):
"""Helper to decide if the package you are using meets minimum version requirement for some feature."""
real_version = pkg_resources.get_distribution(package).version
if should_trunc_to_same_len:
minimum_version = minimum_version[0 : len(real_version)]
logger.debug(
"package %s, version: %s, minimum version to run certain features: %s", package, real_version, minimum_version
)
return real_version >= minimum_version
| 19,322
|
def bboxes_protection(boxes, width, height):
"""
:param boxes:
:param width:
:param height:
:return:
"""
if not isinstance(boxes, np.ndarray):
boxes = np.asarray(boxes)
if len(boxes) > 0:
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, width - 1)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, height - 1)
return boxes
| 19,323
|
def ann_to_json(file_path, save_path, bbox_type='normalize'):
"""convert .mat annotation file into .json file
Args:
file_path (str): .mat file path Ex : relative path '../data/digitStruct.mat' or complete path 'C:/usr/local/data/digitStruct.mat'
save_path (str): .json file directory *Otherthan .json file complete folder directory must exists in the system Ex : '../data/train.json'
bbox_type (str, optional): two type bounding box declaration format whether 'normalize' or 'kitti'. Defaults to 'normalize'.
Returns:
None : just save the .json file in the given dir.
"""
data_dict = mat73.loadmat(file_path)
dSName = data_dict['digitStruct']['name']
dSBbox = data_dict['digitStruct']['bbox']
if bbox_type == 'kitti':
t = True
else:
t = False
json_data = [getDigitStructure_json(
dSBbox, dSName, i, t) for i in range(len(dSBbox))]
with open(save_path, 'w', encoding='utf-8') as pf:
json.dump(json_data, pf, ensure_ascii=True, indent=4)
| 19,324
|
def mark_astroids(astroid_map):
"""
Mark all coordiantes in the grid with an astroid (# sign)
"""
astroids = []
for row, _ in enumerate(astroid_map):
for col, _ in enumerate(astroid_map[row]):
if astroid_map[row][col] == "#":
astroid_map[row][col] = ASTROID
astroids.append((row, col))
else:
astroid_map[row][col] = SPACE
return astroids
| 19,325
|
def ListAllBuckets():
"""Lists all buckets."""
credentials = service_account.Credentials.from_service_account_file(
'./digitalcore-poc-67645bca1a2a.json')
storage_client = storage.Client()
buckets = storage_client.list_buckets()
for bucket in buckets:
print(bucket.name)
blobs = storage_client.list_blobs(bucket)
for blob in blobs:
print(blob.name)
| 19,326
|
def process_player_data(
prefix, season=CURRENT_SEASON, gameweek=NEXT_GAMEWEEK, dbsession=session
):
"""
transform the player dataframe, basically giving a list (for each player)
of lists of minutes (for each match, and a list (for each player) of
lists of ["goals","assists","neither"] (for each match)
"""
df = get_player_history_df(
prefix, season=season, gameweek=gameweek, dbsession=dbsession
)
df["neither"] = df["team_goals"] - df["goals"] - df["assists"]
df.loc[(df["neither"] < 0), ["neither", "team_goals", "goals", "assists"]] = [
0.0,
0.0,
0.0,
0.0,
]
alpha = get_empirical_bayes_estimates(df)
y = df.sort_values("player_id")[["goals", "assists", "neither"]].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
3,
)
)
minutes = df.sort_values("player_id")["minutes"].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
)
)
nplayer = df["player_id"].nunique()
nmatch = df.groupby("player_id").count().iloc[0]["player_name"]
player_ids = np.sort(df["player_id"].unique())
return (
dict(
nplayer=nplayer,
nmatch=nmatch,
minutes=minutes.astype("int64"),
y=y.astype("int64"),
alpha=alpha,
),
player_ids,
)
| 19,327
|
def copy_to_table(_dal, _values, _field_names, _field_types, _table_name, _create_table=None, _drop_existing=None):
"""Copy a matrix of data into a table on the resource, return the table name.
:param _dal: An instance of DAL(qal.dal.DAL)
:param _values: The a list(rows) of lists(values) with values to be inserted
:param _field_names: The name of the fields(columns)
:param _field_types: The field types(qal.sql.types)
:param _table_name: The name of the destination tables
:param _create_table: Create the destination table based on _field_names, _field_types
:param _drop_existing: If a table with the same name as the destination table already exists, drop it
:return: The name of the destination table.
"""
if _drop_existing:
try:
_dal.execute(VerbDropTable(_table_name).as_sql(_dal.db_type))
_dal.commit()
except Exception as e:
print("copy_to_table - Ignoring error when dropping the table \"" + _table_name + "\": " + str(e))
if _create_table:
# Always create temporary table even if it ends up empty.
_create_table_sql = create_table_skeleton(_table_name, _field_names, _field_types).as_sql(_dal.db_type)
print("Creating " + _table_name + " table in "+ str(_dal) +"/" + str(_dal.connection) +", sql:\n" + _create_table_sql)
_dal.execute(_create_table_sql)
_dal.commit()
if len(_values) == 0:
print("copy_to_table: No source data, inserting no rows.")
else:
_insert_sql = make_insert_sql_with_parameters(_table_name, _field_names, _dal.db_type, _field_types)
print("Inserting " + str(len(_values)) + " rows (" + str(len(_values[0])) + " columns)")
_dal.executemany(_insert_sql, _values)
_dal.commit()
return _table_name
| 19,328
|
def svn(registry, xml_parent, data):
"""yaml: svn
Specifies the svn SCM repository for this job.
:arg str url: URL of the svn repository
:arg str basedir: location relative to the workspace root to checkout to
(default '.')
:arg str credentials-id: optional argument to specify the ID of credentials
to use
:arg str repo-depth: Repository depth. Can be one of 'infinity', 'empty',
'files', 'immediates' or 'unknown'. (default 'infinity')
:arg bool ignore-externals: Ignore Externals. (default false)
:arg str workspaceupdater: optional argument to specify
:arg str workspaceupdater: optional argument to specify how to update the
workspace (default wipeworkspace)
:supported values:
* **wipeworkspace** - deletes the workspace before checking out
* **revertupdate** - do an svn revert then an svn update
* **emulateclean** - delete unversioned/ignored files then update
* **update** - do an svn update as much as possible
:arg list(str) excluded-users: list of users to ignore revisions from
when polling for changes (if polling is enabled; parameter is optional)
:arg list(str) included-regions: list of file/folders to include
(optional)
:arg list(str) excluded-regions: list of file/folders to exclude (optional)
:arg list(str) excluded-commit-messages: list of commit messages to exclude
(optional)
:arg str exclusion-revprop-name: revision svn-property to ignore (optional)
:arg bool ignore-property-changes-on-directories: ignore svn-property only
changes of directories (default false)
:arg bool filter-changelog: If set Jenkins will apply the same inclusion
and exclusion patterns for displaying changelog entries as it does for
polling for changes (default false)
:arg list repos: list of repositories to checkout (optional)
:arg str viewvc-url: URL of the svn web interface (optional)
:Repo:
* **url** (`str`) -- URL for the repository
* **basedir** (`str`) -- Location relative to the workspace root
to checkout to (default '.')
* **credentials-id** - optional ID of credentials to use
* **repo-depth** - Repository depth. Can be one of 'infinity',
'empty', 'files', 'immediates' or 'unknown'. (default 'infinity')
* **ignore-externals** - Ignore Externals. (default false)
Multiple repos example:
.. literalinclude:: /../../tests/scm/fixtures/svn-multiple-repos-001.yaml
Advanced commit filtering example:
.. literalinclude:: /../../tests/scm/fixtures/svn-regions-001.yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.scm.SubversionSCM'})
if 'viewvc-url' in data:
browser = XML.SubElement(
scm, 'browser', {'class': 'hudson.scm.browsers.ViewSVN'})
XML.SubElement(browser, 'url').text = data['viewvc-url']
locations = XML.SubElement(scm, 'locations')
def populate_repo_xml(parent, data):
module = XML.SubElement(parent,
'hudson.scm.SubversionSCM_-ModuleLocation')
XML.SubElement(module, 'remote').text = data['url']
XML.SubElement(module, 'local').text = data.get('basedir', '.')
if 'credentials-id' in data:
XML.SubElement(module, 'credentialsId').text = data[
'credentials-id']
repo_depths = ['infinity', 'empty', 'files', 'immediates', 'unknown']
repo_depth = data.get('repo-depth', 'infinity')
if repo_depth not in repo_depths:
raise InvalidAttributeError('repo_depth', repo_depth, repo_depths)
XML.SubElement(module, 'depthOption').text = repo_depth
XML.SubElement(module, 'ignoreExternalsOption').text = str(
data.get('ignore-externals', False)).lower()
if 'repos' in data:
repos = data['repos']
for repo in repos:
populate_repo_xml(locations, repo)
elif 'url' in data:
populate_repo_xml(locations, data)
else:
raise JenkinsJobsException("A top level url or repos list must exist")
updater = data.get('workspaceupdater', 'wipeworkspace')
if updater == 'wipeworkspace':
updaterclass = 'CheckoutUpdater'
elif updater == 'revertupdate':
updaterclass = 'UpdateWithRevertUpdater'
elif updater == 'emulateclean':
updaterclass = 'UpdateWithCleanUpdater'
elif updater == 'update':
updaterclass = 'UpdateUpdater'
XML.SubElement(scm, 'workspaceUpdater', {'class':
'hudson.scm.subversion.' + updaterclass})
mapping = [
# option, xml name, default value
("excluded-regions", 'excludedRegions', []),
("included-regions", 'includedRegions', []),
("excluded-users", 'excludedUsers', []),
("exclusion-revprop-name", 'excludedRevprop', ''),
("excluded-commit-messages", 'excludedCommitMessages', []),
("ignore-property-changes-on-directories", 'ignoreDirPropChanges',
False),
("filter-changelog", 'filterChangelog', False),
]
for optname, xmlname, defvalue in mapping:
if isinstance(defvalue, list):
val = '\n'.join(data.get(optname, defvalue))
else:
val = data.get(optname, defvalue)
# Skip adding xml entry if default is empty and no value given
if not val and (defvalue in ['', []]):
continue
xe = XML.SubElement(scm, xmlname)
if isinstance(defvalue, bool):
xe.text = str(val).lower()
else:
xe.text = str(val)
| 19,329
|
def build_file_path(base_dir, base_name, *extensions):
"""Build a path to a file in a given directory.
The file may have an extension(s).
:returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.expanduser(os.path.join(base_dir, file_name))
| 19,330
|
def lerarquivo(nome):
"""
-> Vai fazer a leitura do arquivo com os respectivos dados dos jogadores de uma forma mais apresentável ao
usuário.
"""
titulo('LISTA DE JOGADORES')
a = open(nome, 'r')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:.<30}{dado[1]} pontos')
a.close()
| 19,331
|
def test_build_wfi_stac_item_keys():
"""test_wfi_build_stac_item_keys"""
meta = get_keys_from_cbers(
"test/fixtures/CBERS_4A_WFI_20200801_221_156_" "L4_BAND13.xml"
)
buckets = {"metadata": "cbers-meta-pds", "cog": "cbers-pds", "stac": "cbers-stac"}
smeta = build_stac_item_keys(meta, buckets)
# id
assert smeta["id"] == "CBERS_4A_WFI_20200801_221_156_L4"
# bbox
assert len(smeta["bbox"]) == 4
assert smeta["bbox"][1] == -38.033425
assert smeta["bbox"][0] == -68.887467
assert smeta["bbox"][3] == -29.919749
assert smeta["bbox"][2] == -59.245969
# geometry is built like other cameras, correct computation
# is checked in test_get_keys_from_cbers4a
# properties
assert smeta["properties"]["datetime"] == "2020-08-01T14:32:45Z"
# properties:view
assert smeta["properties"]["view:sun_elevation"] == 32.8436
assert smeta["properties"]["view:sun_azimuth"] == 29.477449999999997
assert smeta["properties"]["view:off_nadir"] == 0.000431506
# properties:proj
assert smeta["properties"]["proj:epsg"] == 32720
# properties:cbers
assert smeta["properties"]["cbers:data_type"] == "L4"
assert smeta["properties"]["cbers:path"] == 221
assert smeta["properties"]["cbers:row"] == 156
| 19,332
|
def test_default_suite(executed_docstring_source):
"""
>>> def test_default_suite_example():
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_default_suite_example",
has_parent_suite(anything()), # path to testdir
has_suite("test_default_suite"), # created file name
not_(has_sub_suite(anything()))
)
)
| 19,333
|
def Uni(A, b, x=None, maxQ=False, x0=None, tol=1e-12, maxiter=1e3):
"""
Вычисление распознающего функционала Uni.
В случае, если maxQ=True то находится максимум функционала.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
x: float, array_like
Точка в которой вычисляется распознающий функционал.
По умолчанию x равен массиву из нулей.
maxQ: bool
Если значение параметра равно True, то производится
максимизация функционала.
x0: float, array_like
Первоначальная догадка.
tol: float
Погрешность для прекращения оптимизационного процесса.
maxiter: int
Максимальное количество итераций.
Returns:
out: float, tuple
Возвращается значение распознающего функционала в точке x.
В случае, если maxQ=True, то возвращается кортеж, где
первый элемент -- корректность завершения оптимизации,
второй элемент -- точка оптимума,
третий элемент -- значение функции в этой точке.
"""
__uni = lambda x: min(b.rad - (b.mid - A @ x).mig)
__minus_uni = lambda x: -__uni(x)
if maxQ==False:
if x is None:
x = np.zeros(A.shape[1])
return __uni(x)
else:
from scipy.optimize import minimize
if x0 is None:
x0 = np.zeros(A.shape[1])+1
maximize = minimize(__minus_uni, x0, method='Nelder-Mead', tol=tol,
options={'maxiter': maxiter})
return maximize.success, maximize.x, -maximize.fun
| 19,334
|
def boqa(alpha, beta, query, items_stat):
"""Implementation of the BOQA algorithm.
Args:
alpha (float): False positive rate.
beta (float): False negative rate.
query (dict): Dict of query terms (standard terms). Key: term name, value: presence value
items_stat (dict): Dictionnary of items statistics. Key: disease, Value: list of items
Returns:
[dict]: Dictionnary of disease and their prediction probability.
"""
hidden = {}
p = {}
a = {}
a_init = 0
# For each disease
for disease in items_stat:
# We initiliaze Hidden Layer with values from the stats
for term in query:
if term in items_stat[disease]["feature"].keys():
proba = items_stat[disease]["feature"][term]
hidden[term] = np.random.choice([1, 0], p=[proba, 1 - proba])
else:
hidden[term] = 0
# Cardinality calculation of terms between H and Q
m = matrix_m(query, hidden)
a[disease] = (
pow(beta, m[0, 1])
* pow(1 - beta, m[1, 1])
* pow(1 - alpha, m[0, 0])
* pow(alpha, m[1, 0])
)
a_init += a[disease]
for disease in items_stat:
p[disease] = a[disease] / a_init
return p
| 19,335
|
def test_issue_without_title(resolved_doctree, issue):
"""
Test resolval of issues without title.
"""
pytest.assert_issue_xref(resolved_doctree, issue, '#10')
| 19,336
|
def d2c(sys,method='zoh'):
"""Continous to discrete conversion with ZOH method
Call:
sysc=c2d(sys,method='log')
Parameters
----------
sys : System in statespace or Tf form
method: 'zoh' or 'bi'
Returns
-------
sysc: continous system ss or tf
"""
flag = 0
if isinstance(sys, TransferFunction):
sys=tf2ss(sys)
flag=1
a=sys.A
b=sys.B
c=sys.C
d=sys.D
Ts=sys.dt
n=shape(a)[0]
nb=shape(b)[1]
nc=shape(c)[0]
tol=1e-12
if method=='zoh':
if n==1:
if b[0,0]==1:
A=0
B=b/sys.dt
C=c
D=d
else:
tmp1=hstack((a,b))
tmp2=hstack((zeros((nb,n)),eye(nb)))
tmp=vstack((tmp1,tmp2))
s=logm(tmp)
s=s/Ts
if norm(imag(s),ord='inf') > sqrt(sp.finfo(float).eps):
print("Warning: accuracy may be poor")
s=real(s)
A=s[0:n,0:n]
B=s[0:n,n:n+nb]
C=c
D=d
elif method=='foh':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
Id = mat(eye(n))
A = logm(a)/Ts
A = real(around(A,12))
Amat = mat(A)
B = (a-Id)**(-2)*Amat**2*b*Ts
B = real(around(B,12))
Bmat = mat(B)
C = c
D = d - C*(Amat**(-2)/Ts*(a-Id)-Amat**(-1))*Bmat
D = real(around(D,12))
elif method=='bi':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
poles=eigvals(a)
if any(abs(poles-1)<200*sp.finfo(float).eps):
print("d2c: some poles very close to one. May get bad results.")
I=mat(eye(n,n))
tk = 2 / sqrt (Ts)
A = (2/Ts)*(a-I)*inv(a+I)
iab = inv(I+a)*b
B = tk*iab
C = tk*(c*inv(I+a))
D = d- (c*iab)
else:
print("Method not supported")
return
sysc=StateSpace(A,B,C,D)
#print("Teste ", sysc)
if flag==1:
sysc=ss2tf(sysc)
return sysc
| 19,337
|
def ipv4(value):
"""
Parses the value as an IPv4 address and returns it.
"""
try:
return ipaddress.IPv4Address(value)
except ValueError:
return None
| 19,338
|
def test_from(
fork: str,
) -> Callable[
[Callable[[], StateTest]], Callable[[str], Mapping[str, Fixture]]
]:
"""
Decorator that takes a test generator and fills it for all forks after the
specified fork.
"""
fork = fork.capitalize()
def decorator(
fn: Callable[[], StateTest]
) -> Callable[[str], Mapping[str, Fixture]]:
def inner(engine) -> Mapping[str, Fixture]:
return fill_state_test(fn(), forks_from(fork), engine)
cast(Any, inner).__filler_metadata__ = {
"fork": fork,
"name": fn.__name__.lstrip("test_"),
}
return inner
return decorator
| 19,339
|
def get_hash_name(feed_id):
"""
用户提交的订阅源,根据hash值生成唯一标识
"""
return hashlib.md5(feed_id.encode('utf8')).hexdigest()
| 19,340
|
def _load_transition_probabilities(infile: TextIO) -> tuple[list, int]:
"""
For summary files with new syntax (post 2021-11-24).
Parameters
----------
infile : TextIO
The KSHELL summary file at the starting position of either of
the transition probability sections.
Returns
-------
transitions : list
List of transition data.
negative_spin_counts : int
The number of negative spin levels encountered.
Example
-------
B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4
e^2 fm^4 (W.u.)
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391
"""
negative_spin_counts = 0
transitions = []
for _ in range(2): infile.readline()
for line in infile:
line_split = line.split()
if not line_split: break
spin_initial = float(Fraction(line_split[0]))
parity_initial = _parity_string_to_integer(line_split[1])
idx_initial = int(line_split[2])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = _parity_string_to_integer(line_split[5])
idx_final = int(line_split[2])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if (spin_final < 0) or (spin_initial < 0):
"""
-1 spin states in the KSHELL data file indicates
bad states which should not be included.
"""
negative_spin_counts += 1 # Debug.
continue
# reduced_transition_prob_decay_list.append([
# 2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
# parity_final, Ex_final, E_gamma, reduced_transition_prob_decay,
# reduced_transition_prob_excite
# ])
transitions.append([
2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
reduced_transition_prob_decay, reduced_transition_prob_excite
])
return transitions, negative_spin_counts
| 19,341
|
def merge_lineages(counts: Dict[str, int], min_count: int) -> Dict[str, str]:
"""
Given a dict of lineage counts and a min_count, returns a mapping from all
lineages to merged lineages.
"""
assert isinstance(counts, dict)
assert isinstance(min_count, int)
assert min_count > 0
# Merge rare children into their parents.
counts: Dict[str, int] = Counter({decompress(k): v for k, v in counts.items()})
mapping = {}
for child in sorted(counts, key=lambda k: (-len(k), k)):
if counts[child] < min_count:
parent = get_parent(child)
if parent is None:
continue # at a root
counts[parent] += counts.pop(child)
mapping[child] = parent
# Transitively close.
for old, new in list(mapping.items()):
while new in mapping:
new = mapping[new]
mapping[old] = new
# Recompress.
mapping = {compress(k): compress(v) for k, v in mapping.items()}
return mapping
| 19,342
|
def test_std_remove_stereochemistry(mols):
"""Test if all stereochemistry centers (chiral and double bonds) are removed."""
# chirality
assert Chem.FindMolChiralCenters(mols['stereo_chiral'], includeUnassigned=True) == [(5, 'S')]
Chem.RemoveStereochemistry(mols['stereo_chiral']) # mol_ini is modified inplace
assert Chem.FindMolChiralCenters(mols['stereo_chiral'], includeUnassigned=True) == [(5, '?')]
# doublebond
stereo_doublebond = [b.GetStereo() for b in mols['stereo_doublebond'].GetBonds() if b.GetStereo() != Chem.rdchem.BondStereo.STEREONONE]
assert stereo_doublebond == [Chem.rdchem.BondStereo.STEREOE]
Chem.RemoveStereochemistry(mols['stereo_doublebond']) # mol_ini is modified inplace
stereo_doublebond = [b.GetStereo() for b in mols['stereo_doublebond'].GetBonds() if b.GetStereo() != Chem.rdchem.BondStereo.STEREONONE]
assert stereo_doublebond == []
| 19,343
|
def maybe_download_file(project_name, fname, dist_dir):
"""Verify the checksums."""
details = get_file_details(project_name, fname)
if details['sha1']:
sha1 = hashlib.sha1()
dist_filename = os.path.join(dist_dir, fname)
fin = open(dist_filename, 'rb')
sha1.update(fin.read())
fin.close()
hex_digest = sha1.hexdigest()
else:
hex_digext = '-'
if hex_digest == details['sha1']:
print('SHA1 checksums don\'t match, dowloading.')
download_file(project_name, fname, dist_dir)
| 19,344
|
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D | Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D + Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
| 19,345
|
def test_load_tarfile():
"""
Registry can load a tar file.
"""
registry = Registry()
with NamedTemporaryFile() as fileobj:
build_tar(fileobj)
fileobj.flush()
schema_ids = registry.load(fileobj.name)
assert_that(schema_ids, has_length(3))
assert_that(schema_ids, has_item(ADDRESS_ID))
assert_that(schema_ids, has_item(NAME_ID))
assert_that(schema_ids, has_item(RECORD_ID))
assert_that(registry, has_length(3))
assert_that(registry, has_key(ADDRESS_ID))
assert_that(registry, has_key(NAME_ID))
assert_that(registry, has_key(RECORD_ID))
| 19,346
|
def str2bool(s):
"""特定の文字列をbool値にして返す。
s: bool値に変換する文字列(true, false, 1, 0など)。
"""
if isinstance(s, bool):
return s
else:
s = s.lower()
if s == "true":
return True
elif s == "false":
return False
elif s == "1":
return True
elif s == "0":
return False
else:
raise ValueError("%s is incorrect value!" % (s))
| 19,347
|
def no_header_csv_demo():
"""
无标题的csv使用示例
"""
villains = [
["doctor", "no"],
["rosa", "tony"],
["mister", "big"],
["auric", "goldfinger"],
["sophia", "blob"]
]
# 写入
with open("./csv_no_header_test.txt", "wt") as f:
csvout = csv.writer(f)
csvout.writerows(villains)
print("write done")
# 读取
with open("./csv_no_header_test.txt", "rt") as fin:
cin = csv.reader(fin)
villains = [row for row in cin]
print(villains)
| 19,348
|
def merge_aoistats(main_AOI_Stat,new_AOI_Stat,total_time,total_numfixations):
"""a helper method that updates the AOI_Stat object of this Scene with a new AOI_Stat object
Args:
main_AOI_Stat: AOI_Stat object of this Scene
new_AOI_Stat: a new AOI_Stat object
total_time:
total_numfixations:
Returns:
the updated AOI_Sata object
"""
maois = main_AOI_Stat
maois.features['numfixations'] += new_AOI_Stat.features['numfixations']
maois.features['longestfixation'] = max(maois.features['longestfixation'],new_AOI_Stat.features['longestfixation'])
maois.features['totaltimespent'] += + new_AOI_Stat.features['totaltimespent']
maois.features['proportiontime'] = float(maois.features['totaltimespent'])/total_time
maois.features['proportionnum'] = float(maois.features['numfixations'])/total_numfixations
if maois.features['totaltimespent']>0:
maois.features['fixationrate'] = float(maois.features['numfixations'])/maois.features['totaltimespent']
else:
maois.features['fixationrate'] = 0.0
#calculating the transitions to and from this AOI and other active AOIs at the moment
new_AOI_Stat_transition_aois = filter(lambda x: x.startswith(('numtransto_','numtransfrom_')),new_AOI_Stat.features.keys())
if params.DEBUG:
print("segement's transition_aois",new_AOI_Stat_transition_aois)
maois.total_tans_to += new_AOI_Stat.total_tans_to #updating the total number of transition to this AOI
maois.total_tans_from += new_AOI_Stat.total_tans_from #updating the total number of transition from this AOI
for feat in new_AOI_Stat_transition_aois:
if feat in maois.features.copy():
maois.features[feat] += new_AOI_Stat.features[feat]
else:
maois.features[feat] = new_AOI_Stat.features[feat]
# if feat.startswith('numtransto_'):
# sumtransto += maois.features[feat]
# else:
# sumtransfrom += maois.features[feat]
# updating the proportion tansition features based on new transitions to and from this AOI
maois_transition_aois = list(filter(lambda x: x.startswith(('numtransto_','numtransfrom_')),maois.features.keys())) #all the transition features for this AOI should be aupdated even if they are not active for this segment
for feat in maois_transition_aois.copy():
if feat.startswith('numtransto_'):
aid = feat.lstrip('numtransto_')
if maois.total_tans_to > 0:
maois.features['proptransto_%s'%(aid)] = float(maois.features[feat]) / maois.total_tans_to
else:
maois.features['proptransto_%s'%(aid)] = 0
else:
aid = feat.lstrip('numtransfrom_')
if maois.total_tans_from > 0:
maois.features['proptransfrom_%s'%(aid)] = float(maois.features[feat]) / maois.total_tans_from
else:
maois.features['proptransfrom_%s'%(aid)] = 0
###endof trnsition calculation
return maois
| 19,349
|
def catalog_xmatch_circle(catalog, other_catalog,
radius='Association_Radius',
other_radius=Angle(0, 'deg')):
"""Find associations within a circle around each source.
This is convenience function built on `~astropy.coordinates.SkyCoord.search_around_sky`,
extending it in two ways:
1. Each source can have a different association radius.
2. Handle source catalogs (`~astropy.table.Table`) instead of `~astropy.coordinates.SkyCoord`.
Sources are associated if the sum of their radii is smaller than their separation on the sky.
Parameters
----------
catalog : `~astropy.table.Table`
Main source catalog
other_catalog : `~astropy.table.Table`
Other source catalog of potential associations
radius, other_radius : `~astropy.coordinates.Angle` or `str`
Main source catalog association radius.
For `str` this must be a column name (in degrees if without units)
Returns
-------
associations : `~astropy.table.Table`
The list of associations.
"""
if isinstance(radius, six.text_type):
radius = Angle(catalog[radius])
if isinstance(other_radius, six.text_type):
other_radius = Angle(other_catalog[other_radius])
skycoord = skycoord_from_table(catalog)
other_skycoord = skycoord_from_table(other_catalog)
association_catalog_name = other_catalog.meta.get('name', 'N/A')
# Compute associations as list of dict and store in `Table` at the end
associations = []
for source_index in range(len(catalog)):
# TODO: check if this is slower or faster than calling `SkyCoord.search_around_sky` here!?
separation = skycoord[source_index].separation(other_skycoord)
max_separation = radius[source_index] + other_radius
other_indices = np.nonzero(separation < max_separation)[0]
for other_index in other_indices:
association = OrderedDict(
Source_Index=source_index,
Source_Name=catalog['Source_Name'][source_index],
Association_Index=other_index,
Association_Name=other_catalog['Source_Name'][other_index],
Association_Catalog=association_catalog_name,
# There's an issue with scalar `Quantity` objects to init the `Table`
# https://github.com/astropy/astropy/issues/3378
# For now I'll just store the values without unit
Separation=separation[other_index].degree,
)
associations.append(association)
# Need to define columns if there's not a single association
if len(associations) == 0:
log.debug('No associations found.')
table = Table()
table.add_column(Column([], name='Source_Index', dtype=int))
table.add_column(Column([], name='Source_Name', dtype=str))
table.add_column(Column([], name='Association_Index', dtype=int))
table.add_column(Column([], name='Association_Name', dtype=str))
table.add_column(Column([], name='Association_Catalog', dtype=str))
table.add_column(Column([], name='Separation', dtype=float))
else:
log.debug('Found {} associations.'.format(len(associations)))
table = Table(associations, names=associations[0].keys())
return table
| 19,350
|
def show_map_room(room_id=None):
"""Display a room on a map."""
return get_map_information(room_id=room_id)
| 19,351
|
def color_enabled():
"""Check for whether color output is enabled
If the configuration value ``datalad.ui.color`` is ``'on'`` or ``'off'``,
that takes precedence.
If ``datalad.ui.color`` is ``'auto'``, and the environment variable
``NO_COLOR`` is defined (see https://no-color.org), then color is disabled.
Otherwise, enable colors if a TTY is detected by ``datalad.ui.ui.is_interactive``.
Returns
-------
bool
"""
ui_color = cfg.obtain('datalad.ui.color')
return (ui_color == 'on' or
ui_color == 'auto' and os.getenv('NO_COLOR') is None and ui.is_interactive)
| 19,352
|
def parse_line_regex(line):
"""Parse raw data line into list of floats using regex.
This regex approach works, but is very slow!! It also requires two helper functions to clean up
malformed data written by ls-dyna (done on purpose, probably to save space).
Args:
line (str): raw data line from nodout
Returns:
raw_data (list of floats): [nodeID, xdisp, ydisp, zdisp]
"""
try:
raw_data = line.split()
raw_data = [float(x) for x in raw_data]
except ValueError:
line = correct_neg(line)
line = correct_Enot(line)
raw_data = line.split()
raw_data = [float(x) for x in raw_data[0:4]]
return raw_data
| 19,353
|
def create_page_metadata(image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags):
"""
This function creates page metadata for a single page. It includes
transforms, background addition, random panel removal,
panel shrinking, and the populating of panels with
images and speech bubbles.
:param image_dir: List of images to pick from
:type image_dir: list
:param image_dir_path: Path of images dir to add to
panels
:type image_dir_path: str
:param font_files: list of font files for speech bubble
text
:type font_files: list
:param text_dataset: A dask dataframe of text to
pick to render within speech bubble
:type text_dataset: pandas.dataframe
:param speech_bubble_files: list of base speech bubble
template files
:type speech_bubble_files: list
:param speech_bubble_tags: a list of speech bubble
writing area tags by filename
:type speech_bubble_tags: list
:return: Created Page with all the bells and whistles
:rtype: Page
"""
# Select page type
page_type = np.random.choice(
list(cfg.vertical_horizontal_ratios.keys()),
p=list(cfg.vertical_horizontal_ratios.values())
)
# Select number of panels on the page
# between 1 and 8
number_of_panels = np.random.choice(
list(cfg.num_pages_ratios.keys()),
p=list(cfg.num_pages_ratios.values())
)
page = get_base_panels(number_of_panels, page_type)
if np.random.random() < cfg.panel_transform_chance:
page = add_transforms(page)
page = shrink_panels(page)
page = populate_panels(page,
image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags
)
if np.random.random() < cfg.panel_removal_chance:
page = remove_panel(page)
if number_of_panels == 1:
page = add_background(page, image_dir, image_dir_path)
else:
if np.random.random() < cfg.background_add_chance:
page = add_background(page, image_dir, image_dir_path)
return page
| 19,354
|
def __merge_results(
result_list: tp.List[tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]]
) -> tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]:
"""
Merge a list of results into one dictionary.
Args:
result_list: a list of ``commit -> cve`` maps to be merged
Return:
the merged dictionary with line number as key and commit hash, a list of
unique CVE's and a list of unique CWE's as values
"""
results: tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[
CVE, CWE]]]] = defaultdict(lambda: defaultdict(set))
for unmerged in result_list:
for entry in unmerged.keys():
results[entry]['cve'].update(unmerged[entry]['cve'])
results[entry]['cwe'].update(unmerged[entry]['cwe'])
return results
| 19,355
|
def compactness(xyz):
"""
Input: xyz
Output: compactness (V^2/SA^3) of convex hull of 3D points.
"""
xyz = np.array(xyz)
ch = ConvexHull(xyz, qhull_options="QJ")
return ch.volume**2/ch.area**3
| 19,356
|
def get_things_saved(figa, store, store_short, file_name, suggested_name):
""" get filename """
idir = file_name.rsplit('/', maxsplit=1)[0]
sname = suggested_name.rsplit('.', maxsplit=1)[0]
window_save = tk.Tk()
window_save.withdraw()
save_fn = asksaveasfilename(initialfile=sname, filetypes=[('All files', '*')], title='Suggest file name',
initialdir=idir)
window_save.destroy()
store.to_csv(save_fn + '.csv', index_label="index")
store_short.to_csv(save_fn + '_short.csv', index_label="index")
""" resets instructions to nothing before save"""
figa.savefig(save_fn + '.pdf')
| 19,357
|
def is_posix():
"""Convenience function that tests different information sources to verify
whether the operating system is POSIX compliant.
.. note::
No assumption is made reading the POSIX level compliance.
:return: True if the operating system is MacOS, False otherwise.
:rtype: bool
"""
return os.name in OS_POSIX_NAMES
| 19,358
|
def guide(batch, z_dim, hidden_dim, out_dim=None, num_obs_total=None):
"""Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|q)
:param batch: a batch of observations
:return: (named) sampled z from the variational (guide) distribution q(z)
"""
assert(jnp.ndim(batch) == 3)
batch_size = jnp.shape(batch)[0]
batch = jnp.reshape(batch, (batch_size, -1)) # squash each data item into a one-dimensional array (preserving only the batch size on the first axis)
out_dim = jnp.shape(batch)[1]
num_obs_total = batch_size if num_obs_total is None else num_obs_total
encode = numpyro.module('encoder', encoder(hidden_dim, z_dim), (batch_size, out_dim))
with plate('batch', num_obs_total, batch_size):
z_loc, z_std = encode(batch) # obtain mean and variance for q(z) ~ p(z|x) from encoder
z = sample('z', dist.Normal(z_loc, z_std).to_event(1)) # z follows q(z)
return z
| 19,359
|
def trip2str(trip):
""" Pretty-printing. """
header = "{} {} {} - {}:".format(trip['departureTime'],
trip['departureDate'], trip['origin'],
trip['destination'])
output = [header]
for subtrip in trip['trip']:
originstr = u'{}....{}'.format(subtrip['departureTime'],
subtrip['origin'])
output.append(originstr)
for subsubtrip in subtrip['trip']:
t = subsubtrip['arrivalTime']
d = subsubtrip['stop']
intermediatestr = t+u'.'*8+d
output.append(intermediatestr)
destinationstr = u'{}....{}'.format(subtrip['arrivalTime'],
subtrip['destination'])
output.append(destinationstr)
return "\n".join(output)
| 19,360
|
def station_suffix(station_type):
""" Simple switch, map specific types on to single letter. """
suffix = ' (No Dock)'
if 'Planetary' in station_type and station_type != 'Planetary Settlement':
suffix = ' (P)'
elif 'Starport' in station_type:
suffix = ' (L)'
elif 'Asteroid' in station_type:
suffix = ' (AB)'
elif 'Outpost' in station_type:
suffix = ' (M)'
elif 'Carrier' in station_type:
suffix = ' (C)'
return suffix
| 19,361
|
def AddKmsKeyResourceArg(parser, resource, region_fallthrough=False,
boot_disk_prefix=False):
"""Add a resource argument for a KMS key.
Args:
parser: the parser for the command.
resource: str, the name of the resource that the cryptokey will be used to
protect.
region_fallthrough: bool, True if the command has a region flag that should
be used as a fallthrough for the kms location.
boot_disk_prefix: If the key flags have the 'boot-disk' prefix.
"""
flag_name_overrides = None
if boot_disk_prefix:
kms_flags = ['kms-key', 'kms-keyring', 'kms-location', 'kms-project']
flag_name_overrides = dict(
[(flag, '--boot-disk-' + flag) for flag in kms_flags])
concept_parsers.ConceptParser.ForResource(
'--kms-key',
GetKmsKeyResourceSpec(region_fallthrough=region_fallthrough),
'The Cloud KMS (Key Management Service) cryptokey that will be used to '
'protect the {}.'.format(resource),
flag_name_overrides=flag_name_overrides).AddToParser(parser)
| 19,362
|
def addprefixed(unitname, prefixrange='full'):
""" Add prefixes to already defined unit
Parameters
----------
unitname: str
Name of unit to be prefixed, e.k. 'm' -> 'mm','cm','dm','km'
prefixrange: str
Range: 'engineering' -> 1e-18 to 1e12 or 'full' -> 1e-24 to 1e24
"""
if prefixrange == 'engineering':
_prefixes = _engineering_prefixes
else:
_prefixes = _full_prefixes
unit = unit_table[unitname]
for prefix in _prefixes:
prefixedname = prefix[0] + unitname
if prefixedname not in unit_table:
add_composite_unit(prefixedname, prefix[1], unitname, prefixed=True, baseunit=unit, verbosename=unit.verbosename,
url=unit.url)
| 19,363
|
def temperatures_equal(t1, t2):
"""Handle 'off' reported as 126.5, but must be set as 0."""
if t1 == settings.TEMPERATURE_OFF:
t1 = 0
if t2 == settings.TEMPERATURE_OFF:
t2 = 0
return t1 == t2
| 19,364
|
def calc_cf(fname, standard='GC',thickness=1.0,plot=False,xmin=None,xmax=None,interpolation_type='linear'):
"""
Calculates the calibration factor by using different chosen standards like
fname : filename containing the experimental data done on standard sample
standard : 'GC' or 'Water' for (Glassy Carbon with thickness 0.1055cm from NIST) or (Water with the the known thickness as the samples)
thickness : Thickness of Standard sample in cm. It should be 0.1055 for NIST GC standard.
interpolation_type : 'linear','quadratic'or 'cubic'
plot : True or False for plotting or not plotting to view the goodness of fit
xmin,xmax : minimum and maximum Q-value between which the experimental data will be fitted with the standard data available
"""
if os.path.exists(fname):
if standard=='GC':
std_dat=np.loadtxt('./SetupData/glassy_carbon_saxs_std.txt')
elif standard=='Water':
qst=np.linspace(0.003,1.0,1000)
std_dat=np.vstack((qst,np.ones_like(qst)*1.68e-2)).T
tmp_dat=np.loadtxt(fname)
fh=open(fname,'r')
lines=fh.readlines()
for line in lines:
if line[0]=='#':
try:
header,value=line[1:].split('=')
if header=='Energy':
energy=float(value)
except:
pass
#Checking the data for zeros
exp_dat=[]
for i in range(tmp_dat.shape[0]):
if tmp_dat[i,1]>1e-20:
exp_dat.append(tmp_dat[i,:])
exp_dat=np.array(exp_dat)
if xmin is None:
xmin=np.max([np.min(std_dat[:,0]),np.min(exp_dat[:,0])])
if xmax is None:
xmax=np.min([np.max(std_dat[:,0]),np.max(exp_dat[:,0])])
istdmin=np.argwhere(std_dat[:,0]>=xmin)[0][0]
istdmax=np.argwhere(std_dat[:,0]<=xmax)[-1][0]
expdmin=np.argwhere(exp_dat[:,0]>=xmin)[0][0]
expdmax=np.argwhere(exp_dat[:,0]<=xmax)[-1][0]
xmin=np.max([std_dat[istdmin,0],exp_dat[expdmin,0]])
xmax=np.min([std_dat[istdmax,0],exp_dat[expdmax,0]])
x=np.linspace(1.05*xmin,0.95*xmax,100)
istdf=interp1d(std_dat[:,0],std_dat[:,1],kind=interpolation_type)
expdf=interp1d(exp_dat[:,0],exp_dat[:,1],kind=interpolation_type)
param=Parameters()
param.add('cf',value=1.0,vary=True)
res=minimize(fun,param,args=(x,istdf,expdf,thickness))
cf=res.params['cf'].value
#print(cf,qoff)
#cf=np.mean(istdf(x)/expdf(x))
if plot:
pylab.loglog(std_dat[:,0],std_dat[:,1],'r-',lw=3,label='NIST std')
pylab.loglog(x,istdf(x)-res.residual,'g-',lw=3,label='15IDD data')
pylab.xlabel(u'Q, \u212B$^{-1}$',fontsize=fs)
pylab.ylabel(u'I, cm$^{-1}$',fontsize=fs)
pylab.legend(loc='best',prop={'size':fs*0.6})
pylab.xticks(fontsize=fs)
pylab.yticks(fontsize=fs)
pylab.tight_layout()
pylab.show()
return energy,cf,x,istdf(x)
else:
print('%s doesnot exist!'%fname)
| 19,365
|
def import_capitals_from_csv(path):
"""Imports a dictionary that maps country names to capital names.
@param string path: The path of the CSV file to import this data from.
@return dict: A dictionary of the format {"Germany": "Berlin", "Finland": "Helsinki", ...}
"""
capitals = {}
with open(path) as capitals_file:
reader = csv.reader(capitals_file)
for row in reader:
country, capital = row[0], row[1]
capitals[country] = capital
return capitals
| 19,366
|
def read_config(config_file='config.ini'):
"""
Read the configuration file.
:param str config_file: Path to the configuration file.
:return:
"""
if os.path.isfile(config_file) is False:
raise NameError(config_file, 'not found')
config = configparser.ConfigParser()
config.read(config_file)
return config
| 19,367
|
def deg_to_qcm2(p, deg):
"""Return the center-of-momentum momentum transfer q squared, in MeV^2.
Parameters
----------
p_rel = float
relative momentum given in MeV.
degrees = number
angle measure given in degrees
"""
return (p * np.sqrt( 2 * (1 - np.cos(np.radians(deg))) ))**(2)
| 19,368
|
def i18n(request):
"""
Set client language preference, lasts for one month
"""
from django.conf import settings
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
lang = request.GET.get('lang', 'en')
res = HttpResponseRedirect(next)
res.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, max_age=30*24*60*60)
return res
| 19,369
|
def plot_initial_density_profile(job):
"""Plot the initial plasma density profile."""
plot_density_profile(
make_gaussian_dens_func, job.fn("initial_density_profile.png"), job
)
| 19,370
|
def replace_suffix(input_filepath, input_suffix, output_suffix, suffix_delimiter=None):
""" Replaces an input_suffix in a filename with an output_suffix. Can be used
to generate or remove suffixes by leaving one or the other option blank.
TODO: Make suffixes accept regexes. Can likely replace suffix_delimiter after this.
TODO: Decide whether suffixes should extend across multiple directory levels.
Parameters
----------
input_filepath: str
The filename to be transformed.
input_suffix: str
The suffix to be replaced
output_suffix: str
The suffix to replace with.
suffix_delimiter: str
Optional, overrides input_suffix. Replaces whatever
comes after suffix_delimiter with output_suffix.
Returns
-------
output_filepath: str
The transformed filename
"""
split_filename = nifti_splitext(input_filepath)
if suffix_delimiter is not None:
input_suffix = str.split(split_filename[0], suffix_delimiter)[-1]
if input_suffix not in os.path.basename(input_filepath):
print 'ERROR!', input_suffix, 'not in input_filepath.'
return []
else:
if input_suffix == '':
prefix = split_filename[0]
else:
prefix = input_suffix.join(str.split(split_filename[0], input_suffix)[0:-1])
prefix = prefix + output_suffix
output_filepath = prefix + split_filename[1]
return output_filepath
| 19,371
|
def normal222(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(bb3)
idgirande=random.choice(zz2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh)
| 19,372
|
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict)
return model
| 19,373
|
def do_push_button(self, url, params, commit, req_body_params=None):
"""Push a button by following its url
Args:
self: Required for getting a cookiejar and pagetext.
url (string): Target route that pushing the button would trigger.
params (dict): Request queries that go after the url.
commit (string): Name of the button (i.e. value in HTML).
req_body_params (dict): Dict of other vars to be
added to the request body.
"""
from urllib.parse import urlencode
cookie_string = ''
cookie_dict = dict(self.browser.mechsoup.get_cookiejar())
for key in cookie_dict:
if key != 'dash_auth':
cookie_string += urlencode({key: cookie_dict[key]}) + '; '
cookie_string += 'dash_auth=' + cookie_dict['dash_auth']
# Commit message cannot be url-encoded.
encoding = 'utf8=%E2%9C%93&'
mkiconf_vars = get_pagetext_mkiconf(self.browser.get_page().text)
authenticity_token = urlencode({
'authenticity_token': mkiconf_vars['authenticity_token']})
more_params = ''
if req_body_params:
more_params = '&' + urlencode(req_body_params)
request_body = encoding + authenticity_token + commit + more_params
header_dict = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': cookie_string,
'Upgrade-Insecure-Requests': '1',
'DNT': '1',
}
requests.request('POST', url=url, json=request_body,
headers=header_dict, params=params)
self.request_body = request_body
self.header_dict = header_dict
| 19,374
|
def xcode_select(xcode_app_path):
"""Switch the default Xcode system-wide to `xcode_app_path`.
Raises subprocess.CalledProcessError on failure.
To be mocked in tests.
"""
subprocess.check_call([
'sudo',
'xcode-select',
'-switch',
xcode_app_path,
])
| 19,375
|
def _tavella_randell_nonuniform_grid(x_min, x_max, x_star, num_grid_points,
alpha, dtype):
"""Creates non-uniform grid clustered around a specified point.
Args:
x_min: A real `Tensor` of shape `(dim,)` specifying the lower limit of the
grid.
x_max: A real `Tensor` of same shape and dtype as `x_min` specifying the
upper limit of the grid.
x_star: A real `Tensor` of same shape and dtype as `x_min` specifying the
location on the grid around which higher grid density is desired.
num_grid_points: A scalar integer `Tensor` specifying the number of points
on the grid.
alpha: A scalar parameter which controls the degree of non-uniformity of the
grid. The smaller values of `alpha` correspond to greater degree of
clustering around `x_star`.
dtype: The default dtype to use when converting values to `Tensor`s.
Returns:
A real `Tensor` of shape `(dim, num_grid_points+1)` containing the
non-uniform grid.
"""
c1 = tf.math.asinh((x_min - x_star) / alpha)
c2 = tf.math.asinh((x_max - x_star) / alpha)
i = tf.expand_dims(tf.range(0, num_grid_points + 1, 1, dtype=dtype), axis=-1)
grid = x_star + alpha * tf.math.sinh(c2 * i / num_grid_points + c1 *
(1 - i / num_grid_points))
# reshape from (num_grid_points+1, dim) to (dim, num_grid_points+1)
return tf.transpose(grid)
| 19,376
|
def node(func):
"""Decorator for functions which should get currentIndex node if no arg is passed"""
@functools.wraps(func)
def node_wrapper(self, *a, **k):
n = False
keyword = True
# Get node from named parameter
if 'node' in k:
n = k['node']
# Or from the first unnamed argument
elif len(a) >= 1:
n = a[0]
keyword = False
# If node was not specified, get from currentIndex
if n in (None, False):
n = self.model().data(self.currentIndex(), role=QtUserRole)
logging.debug('@node not specified: got selected', n)
elif isDataset(n):
n = docname(n)
logging.debug('@node was a dataset: found path', n)
# If node was expressed as/converted to string, get its corresponding
# tree entry
if isinstance(n, str) or isinstance(n, unicode):
logging.debug('traversing node', n)
n = str(n)
n = self.model().tree.traverse(n)
if keyword:
k['node'] = n
else:
a = list(a)
a[0] = n
a = tuple(a)
logging.debug('@node returning', n, type(n), isinstance(n, unicode))
return func(self, *a, **k)
return node_wrapper
| 19,377
|
def tourme_details():
""" Display Guides loan-details """
return render_template('tourme_details.html', id=str(uuid.uuid4()))
| 19,378
|
def gen_dot_ok(notebook_path, endpoint):
"""
Generates .ok file and return its name
Args:
notebook_path (``pathlib.Path``): the path to the notebook
endpoint (``str``): an endpoint specification for https://okpy.org
Returns:
``str``: the name of the .ok file
"""
assert notebook_path.suffix == '.ipynb', notebook_path
ok_path = notebook_path.with_suffix('.ok')
name = notebook_path.stem
src = [notebook_path.name]
with open(ok_path, 'w') as out:
json.dump({
"name": name,
"endpoint": endpoint,
"src": src,
"tests": {
"tests/q*.py": "ok_test"
},
"protocols": [
"file_contents",
"grading",
"backup"
]
}, out)
return ok_path.name
| 19,379
|
def sarig_methods_wide(
df: pd.DataFrame, sample_id: str, element_id: str,
) -> pd.DataFrame:
"""Create a corresponding methods table to match the pivoted wide form data.
.. note::
This requires the input dataframe to already have had methods mapping applied
by running ``pygeochemtools.geochem.create_dataset.add_sarig_chem_method``
function.
Args:
df (pd.DataFrame): Dataframe containing long form data.
sample_id (str): Name of column containing sample ID's.
element_id (str): Name of column containing geochemical element names.
Returns:
pd.DataFrame: Dataframe with mapped geochemical methods converted to wide form
with one method per sample.
"""
...
df = df
# grab duplicate values
duplicate_df = df[df.duplicated(subset=[sample_id, element_id], keep="last")]
df = df.drop_duplicates(subset=[sample_id, element_id])
method_code = (
df.pivot(index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
determination = (
df.pivot(index=[sample_id], columns=element_id, values=["DETERMINATION"],)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
digestion = (
df.pivot(index=[sample_id], columns=element_id, values=["DIGESTION"],)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
fusion = (
df.pivot(index=[sample_id], columns=element_id, values=["FUSION"],)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
assert (
method_code.columns.size
== determination.columns.size # noqa: W503
== digestion.columns.size # noqa: W503
== fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
c = np.empty(
(
method_code.columns.size
+ determination.columns.size # noqa: W503
+ digestion.columns.size # noqa: W503
+ fusion.columns.size, # noqa: W503
),
dtype=object,
)
c[0::4], c[1::4], c[2::4], c[3::4] = (
method_code.columns,
determination.columns,
digestion.columns,
fusion.columns,
)
df_wide = pd.concat([method_code, determination, digestion, fusion], axis=1)[c]
if not duplicate_df.empty:
try:
dup_method_code = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],
)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
dup_determination = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DETERMINATION"],
)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
dup_digestion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DIGESTION"],
)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
dup_fusion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["FUSION"],
)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
except ValueError as e:
print(
"There were duplicate duplicates in the method list. \
So no duplicates have been included in the output",
e,
)
else:
assert (
dup_method_code.columns.size
== dup_determination.columns.size # noqa: W503
== dup_digestion.columns.size # noqa: W503
== dup_fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
d = np.empty(
(
dup_method_code.columns.size
+ dup_determination.columns.size # noqa: W503
+ dup_digestion.columns.size # noqa: W503
+ dup_fusion.columns.size, # noqa: W503
),
dtype=object,
)
d[0::4], d[1::4], d[2::4], d[3::4] = (
dup_method_code.columns,
dup_determination.columns,
dup_digestion.columns,
dup_fusion.columns,
)
dup_df_wide = pd.concat(
[dup_method_code, dup_determination, dup_digestion, dup_fusion], axis=1
)[d]
df_wide = df_wide.append(dup_df_wide).sort_values(by=sample_id)
return df_wide
| 19,380
|
def scan_torsion(resolution, unique_conformers=[]):
"""
"""
# Load the molecule
sdf_filename = "sdf/pentane.sdf"
suppl = Chem.SDMolSupplier(sdf_filename, removeHs=False, sanitize=True)
mol = next(suppl)
# Get molecule information
# n_atoms = mol.GetNumAtoms()
atoms = mol.GetAtoms()
# atoms = [atom.GetAtomicNum() for atom in atoms]
atoms = [atom.GetSymbol() for atom in atoms]
# Origin
conformer = mol.GetConformer()
origin = conformer.GetPositions()
origin -= rmsd.centroid(origin)
# Dihedral angle
a = 0
b = 1
c = 5
d = 8
# Origin angle
origin_angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
# Setup forcefield
mp = ChemicalForceFields.MMFFGetMoleculeProperties(mol)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(mol, mp)
# Define all angles to scan
angles = get_angles(resolution)
debug_file = "test.xyz"
debug_file = open(debug_file, 'a+')
if len(unique_conformers) == 0:
xyz = rmsd.calculate_rmsd.set_coordinates(atoms, origin)
debug_file.write(xyz)
debug_file.write("\n")
unique_conformers = [origin]
for angle in angles:
# Reset position
for i, pos in enumerate(origin):
conformer.SetAtomPosition(i, pos)
# Set clockwork angle
Chem.rdMolTransforms.SetDihedralDeg(conformer, a, b, c, d, origin_angle + angle)
# Setup constrained ff
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(mol, mp)
ffc.MMFFAddTorsionConstraint(a, b, c, d, False,
origin_angle+angle, origin_angle + angle, 1.0e10)
ffc.Minimize(maxIts=1000, energyTol=1e-2, forceTol=1e-3)
# angle1 = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
ff.Minimize(maxIts=1000, energyTol=1e-2, forceTol=1e-4)
# angle2 = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
pos = conformer.GetPositions()
pos -= rmsd.centroid(pos)
print("debug", len(unique_conformers))
unique = compare_positions(pos, unique_conformers)
if not unique:
continue
pos = align(pos, origin)
unique_conformers.append(pos)
xyz = rmsd.calculate_rmsd.set_coordinates(atoms, pos)
debug_file.write(xyz)
debug_file.write("\n")
print(angle, unique)
debug_file.close()
return unique_conformers
| 19,381
|
def get_and_update_versions ():
""" Gets current version information for each component,
updates the version files, creates changelog entries,
and commit the changes into the repository."""
try:
get_comp_versions ("ACE")
get_comp_versions ("TAO")
get_comp_versions ("CIAO")
get_comp_versions ("DAnCE")
files = list ()
files += update_version_files ("ACE")
files += update_version_files ("TAO")
files += update_version_files ("CIAO")
files += update_version_files ("DAnCE")
files += create_changelog ("ACE")
files += create_changelog ("TAO")
files += create_changelog ("CIAO")
files += create_changelog ("DAnCE")
files += update_spec_file ()
files += update_debianbuild ()
print "Committing " + str(files)
commit (files)
except:
print "Fatal error in get_and_update_versions."
raise
| 19,382
|
def vflip_box(box: TensorOrArray, image_center: TensorOrArray) -> TensorOrArray:
"""Flip boxes vertically, which are specified by their (cx, cy, w, h) norm
coordinates.
Reference:
https://blog.paperspace.com/data-augmentation-for-bounding-boxes/
Args:
box (TensorOrArray[B, 4]):
Boxes to be flipped.
image_center (TensorOrArray[4]):
Center of the image.
Returns:
box (TensorOrArray[B, 4]):
Flipped boxes.
"""
box[:, [1, 3]] += 2 * (image_center[[0, 2]] - box[:, [1, 3]])
box_h = abs(box[:, 1] - box[:, 3])
box[:, 1] -= box_h
box[:, 3] += box_h
return box
| 19,383
|
def example_data():
"""Example data setup"""
tdata = (
pathlib.Path(__file__).parent.absolute() / "data" / "ident-example-support.txt"
)
return tdata
| 19,384
|
def add_ending_slash(directory: str) -> str:
"""add_ending_slash function
Args:
directory (str): directory that you want to add ending slash
Returns:
str: directory name with slash at the end
Examples:
>>> add_ending_slash("./data")
"./data/"
"""
if directory[-1] != "/":
directory = directory + "/"
return directory
| 19,385
|
def init_logging():
""" Initialize logging """
logger = logging.getLogger("")
# Make sure the logging path exists, create if not
logdir = os.path.dirname(CES_SETTINGS['logFile'])
if logdir and not os.path.exists(logdir):
print "Logging directory '%s' doesn't exist, creating it now." % logdir
os.makedirs(logdir)
# Heck, let's create the oauth storage here as well
oauthdir = os.path.dirname(CES_SETTINGS['oauthStorage'])
if oauthdir and not os.path.exists(oauthdir):
print "Oauth2 token storage directory '%s' doesn't exist, creating it now." % oauthdir
os.makedirs(oauthdir)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
filehandler = logging.handlers.RotatingFileHandler(filename=CES_SETTINGS['logFile'],
maxBytes=(5 * 1024 * 1024), backupCount=10)
handler.setLevel(logging.INFO)
filehandler.setLevel(logging.INFO)
formatter = logging.Formatter(_LOG_FORMATTER_STRING)
handler.setFormatter(formatter)
filehandler.setFormatter(formatter)
if DEBUG:
debugfilehandler = logging.handlers.RotatingFileHandler(filename=CES_SETTINGS['logFile'] + ".debug",
maxBytes=(5 * 1024 * 1024), backupCount=10)
debugfilehandler.setLevel(logging.DEBUG)
debugfilehandler.setFormatter(formatter)
logger.addHandler(debugfilehandler)
logger.addHandler(handler)
logger.addHandler(filehandler)
logger.debug("Logging started.")
| 19,386
|
def antenna_positions():
"""
Generate antenna positions for a regular rectangular array, then return
baseline lengths.
- Nx, Ny : No. of antennas in x and y directions
- Dmin : Separation between neighbouring antennas
"""
# Generate antenna positions on a regular grid
x = np.arange(Nx) * Dmin
y = np.arange(Ny) * Dmin
xx, yy = np.meshgrid(x, y)
# Calculate baseline separations
xy = np.column_stack( (xx.flatten(), yy.flatten()) )
d = scipy.spatial.distance.pdist(xy)
return d
| 19,387
|
def main():
"""
A simple program that plots phonon output.
"""
from numpy import zeros, log10
infile=raw_input('Infile name: ')
fin=open(infile,'r')
hdr=fin.readline().strip('\n').split()
x1=float(hdr[0])
x2=float(hdr[1])
nxdim=int(hdr[2])
t1=float(hdr[3])
t2=float(hdr[4])
ntdim=int(hdr[5])
rbin=zeros(ntdim*nxdim).reshape(ntdim,nxdim)
for ix in range(nxdim):
rbin[:,ix]=fin.readline().strip('\n').split()
lbin=log10(rbin)
import matplotlib
matplotlib.use('TkAGG')
from matplotlib import pylab as plt
plt.imshow(lbin,extent=[x1,x2,t1,t2],aspect='auto',origin='lower')
cbar=plt.colorbar()
cbar.set_label('$\log_{10}$[ Amplitude ]')
plt.xlabel('Distance (degrees)')
plt.ylabel('Time (s)')
plt.savefig('mypost.eps')
| 19,388
|
def folds_to_list(folds: Union[list, str, pd.Series]) -> List[int]:
"""
This function formats string or either list of numbers
into a list of unique int
Args:
folds (Union[list, str, pd.Series]): Either list of numbers or
one string with numbers separated by commas or
pandas series
Returns:
List[int]: list of unique ints
Examples:
>>> folds_to_list("1,2,1,3,4,2,4,6")
[1, 2, 3, 4, 6]
>>> folds_to_list([1, 2, 3.0, 5])
[1, 2, 3, 5]
Raises:
ValueError: if value in string or array cannot be casted to int
"""
if isinstance(folds, str):
folds = folds.split(",")
elif isinstance(folds, pd.Series):
folds = list(sorted(folds.unique()))
return list({int(x) for x in folds})
| 19,389
|
def predict_all(x, model, config, spline):
"""
Predict full scene using average predictions.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array average probabilities
----------
Example
----------
predict_all(x, model, config, spline)
"""
for i in range(8):
if i == 0: # reverse first dimension
x_seg = predict_windowing(
x[::-1, :, :], model, config, spline=spline
).transpose([2, 0, 1])
elif i == 1: # reverse second dimension
temp = predict_windowing(
x[:, ::-1, :], model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp[:, ::-1, :] + x_seg
elif i == 2: # transpose(interchange) first and second dimensions
temp = predict_windowing(
x.transpose([1, 0, 2]), model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp.transpose(0, 2, 1) + x_seg
gc.collect()
elif i == 3:
temp = predict_windowing(
np.rot90(x, 1), model, config, spline=spline
)
x_seg = np.rot90(temp, -1).transpose([2, 0, 1]) + x_seg
gc.collect()
elif i == 4:
temp = predict_windowing(
np.rot90(x, 2), model, config, spline=spline
)
x_seg = np.rot90(temp, -2).transpose([2, 0, 1]) + x_seg
elif i == 5:
temp = predict_windowing(
np.rot90(x, 3), model, config, spline=spline
)
x_seg = np.rot90(temp, -3).transpose(2, 0, 1) + x_seg
elif i == 6:
temp = predict_windowing(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
elif i == 7:
temp = predict_sliding(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
gc.collect()
del x, temp # delete arrays
x_seg /= 8.0
return x_seg.argmax(axis=0)
| 19,390
|
def pre_arrange_cols(dataframe):
"""
DOCSTRING
:param dataframe:
:return:
"""
col_name = dataframe.columns.values[0]
dataframe.loc[-1] = col_name
dataframe.index = dataframe.index + 1
dataframe = dataframe.sort_index()
dataframe = dataframe.rename(index=str, columns={col_name: 'all'})
return dataframe
| 19,391
|
def wDot(x,y,h):
"""
Compute the parallel weighted dot product of vectors x and y using
weight vector h.
The weighted dot product is defined for a weight vector
:math:`\mathbf{h}` as
.. math::
(\mathbf{x},\mathbf{y})_h = \sum_{i} h_{i} x_{i} y_{i}
All weight vector components should be positive.
:param x,y,h: numpy arrays for vectors and weight
:return: the weighted dot product
"""
return globalSum(numpy.sum(x*y*h))
| 19,392
|
def dataframify(transform):
"""
Method which is a decorator transforms output of scikit-learn feature normalizers from array to dataframe.
Enables preservation of column names.
Args:
transform: (function), a scikit-learn feature selector that has a transform method
Returns:
new_transform: (function), an amended version of the transform method that returns a dataframe
"""
@wraps(transform)
def new_transform(self, df):
arr = transform(self, df.values)
return pd.DataFrame(arr, columns=df.columns, index=df.index)
return new_transform
| 19,393
|
def add_corrected_pages_summary_panel(request, items):
"""Replaces the Pages summary panel to hide variants."""
for index, item in enumerate(items):
if item.__class__ is PagesSummaryItem:
items[index] = CorrectedPagesSummaryItem(request)
| 19,394
|
def plot_map(self, map, update=False):
"""
map plotting
Parameters
----------
map : ndarray
map to plot
update : Bool
updating the map or plotting from scratch
"""
import matplotlib.pyplot as plt
if update:
empty=np.empty(np.shape(self.diagnostics[self.diagnostic]))
empty[:]=np.nan
self.map.set_data(empty)
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
else:
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
| 19,395
|
def definition_activate(connection, args):
"""Activate Business Service Definition"""
activator = sap.cli.wb.ObjectActivationWorker()
activated_items = ((name, sap.adt.ServiceDefinition(connection, name)) for name in args.name)
return sap.cli.object.activate_object_list(activator, activated_items, count=len(args.name))
| 19,396
|
def extractCompositeFigureStrings(latexString):
"""
Returns a list of latex figures as strings stripping out captions.
"""
# extract figures
figureStrings = re.findall(r"\\begin{figure}.*?\\end{figure}", latexString, re.S)
# filter composite figures only and remove captions (preserving captions in subfigures)
figureStrings = [
re.findall(r"\\begin{figure}.*(?=\n.*\\caption)", figureString, re.S)[0] + "\n\\end{figure}"
for figureString in figureStrings if "\\begin{subfigure}" in figureString
]
return figureStrings
| 19,397
|
def osm_net_download(
polygon=None,
north=None,
south=None,
east=None,
west=None,
network_type="all_private",
timeout=180,
memory=None,
date="",
max_query_area_size=50 * 1000 * 50 * 1000,
infrastructure='way["highway"]',
):
"""
Download OSM ways and nodes within some bounding box from the Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the street network within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
network_type : string
{'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private'} what
type of street network to get
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
date : string
query the database at a certain timestamp
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is in:
any polygon bigger will get divided up for multiple queries to API
(default is 50,000 * 50,000 units [ie, 50km x 50km in area, if units are
meters])
infrastructure : string
download infrastructure of given type. default is streets, ie,
'way["highway"]') but other infrastructures may be selected like power
grids, ie, 'way["power"~"line"]'
Returns
-------
response_jsons : list
"""
# check if we're querying by polygon or by bounding box based on which
# argument(s) where passed into this function
by_poly = polygon is not None
by_bbox = not (
north is None or south is None or east is None or west is None
)
if not (by_poly or by_bbox):
raise ValueError(
"You must pass a polygon or north, south, east, and west"
)
# create a filter to exclude certain kinds of ways based on the requested
# network_type
osm_filter = ox.get_osm_filter(network_type)
response_jsons = []
# pass server memory allocation in bytes for the query to the API
# if None, pass nothing so the server will use its default allocation size
# otherwise, define the query's maxsize parameter value as whatever the
# caller passed in
if memory is None:
maxsize = ""
else:
maxsize = "[maxsize:{}]".format(memory)
# define the query to send the API
# specifying way["highway"] means that all ways returned must have a highway
# key. the {filters} then remove ways by key/value. the '>' makes it recurse
# so we get ways and way nodes. maxsize is in bytes.
if by_bbox:
# turn bbox into a polygon and project to local UTM
polygon = Polygon(
[(west, south), (east, south), (east, north), (west, north)]
)
geometry_proj, crs_proj = ox.project_geometry(polygon)
# subdivide it if it exceeds the max area size (in meters), then project
# back to lat-long
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
log(
"Requesting network data within bounding box from API in {:,} request(s)".format(
len(geometry)
)
)
start_time = time.time()
# loop through each polygon rectangle in the geometry (there will only
# be one if original bbox didn't exceed max area size)
for poly in geometry:
# represent bbox as south,west,north,east and round lat-longs to 8
# decimal places (ie, within 1 mm) so URL strings aren't different
# due to float rounding issues (for consistent caching)
west, south, east, north = poly.bounds
query_template = (
date
+ "[out:json][timeout:{timeout}]{maxsize};"
+ "({infrastructure}{filters}"
+ "({south:.8f},{west:.8f},{north:.8f},{east:.8f});>;);out;"
)
query_str = query_template.format(
north=north,
south=south,
east=east,
west=west,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within bounding box from API in {:,} request(s) and {:,.2f} seconds".format(
len(geometry), time.time() - start_time
)
)
elif by_poly:
# project to utm, divide polygon up into sub-polygons if area exceeds a
# max size (in meters), project back to lat-long, then get a list of
# polygon(s) exterior coordinates
geometry_proj, crs_proj = ox.project_geometry(polygon)
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
polygon_coord_strs = ox.get_polygons_coordinates(geometry)
log(
"Requesting network data within polygon from API in {:,} request(s)".format(
len(polygon_coord_strs)
)
)
start_time = time.time()
# pass each polygon exterior coordinates in the list to the API, one at
# a time
for polygon_coord_str in polygon_coord_strs:
query_template = (
date
+ '[out:json][timeout:{timeout}]{maxsize};({infrastructure}{filters}(poly:"{polygon}");>;);out;'
)
query_str = query_template.format(
polygon=polygon_coord_str,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within polygon from API in {:,} request(s) and {:,.2f} seconds".format(
len(polygon_coord_strs), time.time() - start_time
)
)
return response_jsons
| 19,398
|
def _find_results_files(source_path: str, search_depth: int = 2) -> list:
"""Looks for results.json files in the path specified
Arguments:
source_path: the path to use when looking for result files
search_depth: the maximum folder depth to search
Return:
Returns a list containing found files
Notes:
A search depth of less than 2 will not recurse into sub-folders; a search depth of 2 will only recurse into
immediate sub-folders and no deeper; a search depth of 3 will recurse into the sub-folders of sub-folders; and
so on
"""
res_name = 'results.json'
res_name_len = len(res_name)
# Common expression declared once outside of recursion
# Checks that the file name matches exactly the testing string (res_name)
name_check_passes = lambda name: name.endswith(res_name) and os.path.isdir(name[:-res_name_len])
if not source_path:
return []
# Declare embedded function to do the work
def perform_recursive_find(path: str, depth: int) -> list:
"""Recursively finds results files
Arguments:
path: the path to check for results files
depth: the maximum folder depth to recurse (starting at 1)
Return:
Returns a list of found files
"""
return_list = []
# Basic checks
if os.path.isfile(path):
if name_check_passes(path):
logging.debug("Result file check specified result file: '%s'", path)
return [path]
logging.debug("Result file check name is not valid: '%s'", path)
return return_list
# We only process folders after the above checks
if not os.path.isdir(path):
logging.debug("Error: result file check path is not a file or folder: '%s'", path)
return return_list
# Loop over current folder looking for other folders and for a results file
for one_name in os.listdir(path):
check_name = os.path.join(path, one_name)
if name_check_passes(check_name):
logging.debug("Found result file: '%s'", check_name)
return_list.append(check_name)
elif depth > 1 and os.path.isdir(check_name):
logging.debug("Searching folder for result files: '%s'", check_name)
found_results = perform_recursive_find(check_name, depth - 1)
if found_results:
return_list.extend(found_results)
return return_list
# Find those files!
return perform_recursive_find(source_path, search_depth)
| 19,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.