content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import os
def Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape_lsc,ClipLandsat,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn,dr,Landsat_nr, proyDEM_fileName):
"""
This function calculates and returns the reflectance and spectral radiation from the landsat image.
"""
Spec_Rad = np.zeros((shape_lsc[1], shape_lsc[0], 7))
Reflect = np.zeros((shape_lsc[1], shape_lsc[0], 7))
for band in Bands[:-(len(Bands)-6)]:
# Open original Landsat image for the band number
src_FileName = os.path.join(input_folder, '%s_B%1d.TIF'
% (Name_Landsat_Image, band))
ls_data=Open_landsat(src_FileName, proyDEM_fileName)
ls_data = ls_data*ClipLandsat
# stats = band_data.GetStatistics(0, 1)
index = np.where(Bands[:-(len(Bands)-6)] == band)[0][0]
if Landsat_nr == 8:
# Spectral radiance for each band:
L_lambda = Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L8, index, cos_zn, dr)
elif Landsat_nr == 7:
# Spectral radiance for each band:
L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L7, index, cos_zn, dr)
elif Landsat_nr == 5:
# Spectral radiance for each band:
L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda =Landsat_rho_lambda(L_lambda, ESUN_L5, index, cos_zn, dr)
else:
print('Landsat image not supported, use Landsat 5, 7 or 8')
Spec_Rad[:, :, index] = L_lambda
Reflect[:, :, index] = rho_lambda
Reflect = Reflect.clip(0.0, 1.0)
return(Reflect,Spec_Rad)
|
4c6e49e22ac2b4b12dece71c3c3e73afdc72d0ca
| 3,639,300
|
def second(lst):
"""Same as first(nxt(lst)).
"""
return first(nxt(lst))
|
aa49e089a06a4b3e7d781966d8b4f98b7fe15841
| 3,639,301
|
def gaussian_noise(height, width):
"""
Create a background with Gaussian noise (to mimic paper)
"""
# We create an all white image
image = np.ones((height, width)) * 255
# We add gaussian noise
cv2.randn(image, 235, 10)
return Image.fromarray(image).convert("RGBA")
|
6243fde57b3e7415edc2024eebbe10f059b93a55
| 3,639,302
|
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 1, x1:x2] = color
image[y2:y2 + 1, x1:(x2+1)] = color
image[y1:y2, x1:x1 + 1] = color
image[y1:y2, x2:x2 + 1] = color
return image
|
4d1e713c6cb6a3297b4f7d8ab9682205947770da
| 3,639,303
|
def get_statuses_one_page(weibo_client, max_id=None):
"""获取一页发布的微博
"""
if max_id:
statuses = weibo_client.statuses.user_timeline.get(max_id=max_id)
else:
statuses = weibo_client.statuses.user_timeline.get()
return statuses
|
4a214489aa5696c9683c9cfa96d79ee169135eb5
| 3,639,304
|
def do_nothing(ax):
"""Do not add any watermark."""
return ax
|
6fbe32dc45ca1a945e1c45bf0319770c4d683397
| 3,639,305
|
def exec_lm_pipe(taskstr):
"""
Input: taskstr contains LM calls separated by ;
Used for execute config callback parameters (IRQs and BootHook)
"""
try:
# Handle config default empty value (do nothing)
if taskstr.startswith('n/a'):
return True
# Execute individual commands - msgobj->"/dev/null"
for cmd in (cmd.strip().split() for cmd in taskstr.split(';')):
if not exec_lm_core_schedule(cmd):
console_write("|-[LM-PIPE] task error: {}".format(cmd))
except Exception as e:
console_write("[IRQ-PIPE] error: {}\n{}".format(taskstr, e))
errlog_add('exec_lm_pipe error: {}'.format(e))
return False
return True
|
8854b5de0f408caf9292aecbcfa261744166e744
| 3,639,306
|
def term_size():
"""Print out a sequence of ANSI escape code which will report back the
size of the window.
"""
# ESC 7 - Save cursor position
# ESC 8 - Restore cursor position
# ESC [r - Enable scrolling for entire display
# ESC [row;colH - Move to cursor position
# ESC [6n - Device Status Report - send ESC [row;colR
repl= None
if 'repl_source' in dir(pyb):
repl = pyb.repl_source()
if repl is None:
repl = pyb.USB_VCP()
repl.send(b'\x1b7\x1b[r\x1b[999;999H\x1b[6n')
pos = b''
while True:
char = repl.recv(1)
if char == b'R':
break
if char != b'\x1b' and char != b'[':
pos += char
repl.send(b'\x1b8')
(height, width) = [int(i, 10) for i in pos.split(b';')]
return height, width
|
bc0b09163b48f821315f52c52b0a58b6b5fb977a
| 3,639,307
|
def get_dashboard(request, project_id):
"""
Load Project Dashboard to display Latest Cost Estimate and List of Changes
"""
project = get_object_or_404(Project, id=project_id)
# required to determine permission of user,
# if not a project user then project owner
try:
project_user = ProjectUser.objects.get(
project=project, project_user=request.user)
except ProjectUser.DoesNotExist:
project_user = None
form = ChangeForm()
attachmentsForm = ChangeAttachmentsForm()
changes = Change.objects.filter(project_id=project_id)
# Calculations to display on dashboard
original_estimate = project.original_estimate
accepted_changes = Change.objects.filter(
project_id=project_id, change_status="A").aggregate(
Sum('change_cost'))['change_cost__sum']
if accepted_changes is None:
accepted_changes = 0
pending_changes = Change.objects.filter(
project_id=project_id, change_status="P").aggregate(
Sum('change_cost'))['change_cost__sum']
if pending_changes is None:
pending_changes = 0
wip_changes = Change.objects.filter(
project_id=project_id, change_status="WiP").aggregate(
Sum('change_cost'))['change_cost__sum']
if wip_changes is None:
wip_changes = 0
rejected_changes = Change.objects.filter(
project_id=project_id, change_status="R").aggregate(
Sum('change_cost'))['change_cost__sum']
if rejected_changes is None:
rejected_changes = 0
subtotal = original_estimate + accepted_changes
total = subtotal + pending_changes + wip_changes
context = {
'project': project,
'project_user': project_user,
'form': form,
'attachmentsForm': attachmentsForm,
'changes': changes,
'original_estimate': original_estimate,
'accepted_changes': accepted_changes,
'pending_changes': pending_changes,
'wip_changes': wip_changes,
'rejected_changes': rejected_changes,
'subtotal': subtotal,
'total': total,
}
return render(request, 'dashboard/project.html', context)
|
36257741b2ef220d35e4593bd080a82b4cc743a0
| 3,639,308
|
def _scan_real_end_loop(bytecode, setuploop_inst):
"""Find the end of loop.
Return the instruction offset.
"""
start = setuploop_inst.next
end = start + setuploop_inst.arg
offset = start
depth = 0
while offset < end:
inst = bytecode[offset]
depth += inst.block_effect
if depth < 0:
return inst.next
offset = inst.next
|
9cff8ab77563a871b86cdbb14236603ec58e04b6
| 3,639,309
|
def six_node_range_5_to_0_bst():
"""Six nodes covering range five to zero."""
b = BST([5, 4, 3, 2, 1, 0])
return b
|
1afe6c613b03def6dc9d8aed41624e40180e5ae5
| 3,639,310
|
def IndividualsInAlphabeticOrder(filename):
"""Checks if the names are in alphabetic order"""
with open(filename, 'r') as f:
lines = f.readlines()
individual_header = '# Individuals:\n'
if individual_header in lines:
individual_authors = lines[lines.index(individual_header) + 1:]
sorted_authors = sorted(individual_authors, key=str.casefold)
if sorted_authors == individual_authors:
print("Individual authors are sorted alphabetically.")
return True
else:
print("Individual authors are not sorted alphabetically."
" The expected order is:")
print(''.join(sorted_authors))
return False
else:
print("Cannot find line '# Individuals:' in file.")
return False
|
4753bbf41498373695f921555c8f01183dbb58dc
| 3,639,311
|
import mxnet
from mxnet.gluon.data.vision import transforms
from PIL import Image
def preprocess_img_imagenet(img_path):
"""Preprocessing required for ImageNet classification.
Reference:
https://github.com/onnx/models/tree/master/vision/classification/vgg
"""
img = Image.open(img_path)
img = mxnet.ndarray.array(img)
transform_fn = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
img = transform_fn(img)
img = img.expand_dims(axis=0) # Batchify.
return img.asnumpy()
|
f181e3376f26ee14c6314a8a730e796eefb09e2e
| 3,639,312
|
def create_lambertian(color):
"""
create a lambertion material
"""
material = bpy.data.materials.new(name="Lambertian")
material.use_nodes = True
nodes = material.node_tree.nodes
# remove principled
material.node_tree.nodes.remove(
material.node_tree.nodes.get('Principled BSDF'))
# get material output
material_output = material.node_tree.nodes.get('Material Output')
# Add a diffuse shader and set its location:
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.inputs['Color'].default_value = color
# link diffuse shader to material
material.node_tree.links.new(
material_output.inputs[0], diffuse_node.outputs[0])
return material
|
e291817853ec26d6767d8fd496ee5ced15ff87f2
| 3,639,313
|
def submission_view(request, locker_id, submission_id):
"""Displays an individual submission"""
submission = get_object_or_404(Submission, pk=submission_id)
newer = submission.newer()
newest = Submission.objects.newest(submission.locker)
if not newest:
newest = submission
oldest = Submission.objects.oldest(submission.locker)
if not oldest:
oldest = submission
older = submission.older()
discussion_enabled = submission.locker.discussion_enabled()
is_owner = submission.locker.owner == request.user
users_discussion = submission.locker.discussion_users_have_access()
users_workflow = submission.locker.workflow_users_can_edit()
workflow_enabled = submission.locker.workflow_enabled()
# generate a message to the user if the submission is deleted
if submission.deleted:
messages.warning(request,
u'<strong>Heads up!</strong> This submission has '
u'been deleted and <strong>will be permanently '
u'removed</strong> from the locker '
u'<strong>{}</strong>.'
u''.format(naturaltime(submission.purge_date)))
return render(request, 'datalocker/submission_view.html', {
'data': submission.data_dict(with_types=True),
'discussion_enabled': discussion_enabled,
'discussion_users_have_access': users_discussion or is_owner,
'newer': newer,
'newer_disabled': True if submission.id == newer.id else False,
'newest': newest,
'newest_disabled': True if submission.id == newest.id else False,
'older': older,
'older_disabled': True if submission.id == older.id else False,
'oldest': oldest,
'oldest_disabled': True if submission.id == oldest.id else False,
'sidebar_enabled': workflow_enabled or discussion_enabled,
'submission': submission,
'workflow_enabled': workflow_enabled,
'workflow_states': submission.locker.workflow_states(),
'workflow_state': submission.workflow_state,
'workflow_users_can_edit': users_workflow or is_owner,
})
|
f473c7ad2c59dfd27a96fa4478f6b9652e740296
| 3,639,314
|
from pathlib import Path
def add_filename_suffix(file_path: str, suffix: str) -> str:
"""
Append a suffix at the filename (before the extension).
Args:
path: pathlib.Path The actual path object we would like to add a suffix
suffix: The suffix to add
Returns: path with suffix appended at the end of the filename and before extension
"""
path = Path(file_path)
return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix))
|
546bb95f694ee5d5cb26873428fcac8453df6a54
| 3,639,315
|
def list_dropdownTS(dic_df):
"""
input a dictionary containing what variables to use, and how to clean
the variables
It outputs a list with the possible pair solutions.
This function will populate a dropdown menu in the eventHandler function
"""
l_choice = []
for key_cat, value_cat in dic_df['var_continuous'].items():
l_choice.append(value_cat['name'])
l_choice = ['-'] + l_choice
return l_choice
|
fcd0474fa6941438cb39c63aa7605f1b776fd538
| 3,639,316
|
import itertools
import random
def get_voice_combinations(**kwargs):
"""
Gets k possible combinations of voices from a list of voice indexes. If k is None, it will return all possible
combinations. The combinations are of a minimum size min_n_voices_to_remove and a max size
max_n_voices_to_remove. When choosing a k number a combinations from all possible combinations, the probability
of choosing a combination of a number of voices above another can be passed with the prob list, where for a range
of voices to remove from 1 to 3, [1, 1, 1] indicates equal probability, [1,1,2] indicates that combinations with
3 voices have double probability of getting chosen, etc.
@param kwargs: see below
@return voice_idx_comb: combinations of voice indexes
"""
# list of voices to remove
voice_idx = kwargs.get("voice_idx", [0, 1, 2, 3, 4])
min_n_voices_to_remove = kwargs.get(
"min_n_voices_to_remove", 1) # min size of the combination
max_n_voices_to_remove = kwargs.get(
"max_n_voices_to_remove", 3) # max size of the combination
# prob of each n_voices_to_remove set in ascending order
prob = kwargs.get("prob", [1, 1, 1])
k = kwargs.get("k", 5) # max number of combinations to return
if len(voice_idx) < max_n_voices_to_remove:
max_n_voices_to_remove = len(voice_idx)
range_items = range(min_n_voices_to_remove, max_n_voices_to_remove + 1)
assert (len(prob) == len(
range_items)), "The prob list must be the same length as the range(min_n_voices_to_remove, max_n_voices_to_remove)"
voice_idx_comb = []
weights = []
for i, n_voices_to_remove in enumerate(range_items):
_voice_idx_comb = list(itertools.combinations(
voice_idx, n_voices_to_remove))
voice_idx_comb.extend(_voice_idx_comb)
_weights = list(np.repeat(prob[i], len(_voice_idx_comb)))
weights.extend(_weights)
if k is not None: # if there is no k, return all possible combinations
voice_idx_comb = random.choices(voice_idx_comb, weights=weights, k=k)
return list(voice_idx_comb)
|
d3addbfe5023b5ee6e25f190c53b469593bb9ff4
| 3,639,317
|
def data(request):
"""This is a the main entry point to the Data tab."""
context = cache.get("data_tab_context")
if context is None:
context = data_context(request)
cache.set("data_tab_context", context, 29)
return render(request, "rundb/data/data.html", context)
|
2763617afc7d865acaf3f0dcbf9190bd084ad5ae
| 3,639,318
|
def setup_root(name: str) -> DLogger:
"""Create the root logger."""
logger = get_logger(name)
msg_format = "%(message)s"
level_style = {
"critical": {"color": "magenta", "bright": True, "bold": True},
"debug": {"color": "green", "bright": True, "bold": True},
"error": {"color": "red", "bright": True, "bold": True},
"info": {"color": 4, "bright": True, "bold": True},
"notice": {"color": "magenta", "bright": True, "bold": True},
"spam": {"color": "green", "faint": True},
"success": {"color": "green", "bright": True, "bold": True},
"verbose": {"color": "blue", "bright": True, "bold": True},
"warning": {"color": "yellow", "bright": True, "bold": True},
}
coloredlogs.install(fmt=msg_format, level_styles=level_style, level="INFO")
return logger
|
9cad79c254fcb8f075d549c457d7e09dacc9bb33
| 3,639,319
|
import typing
import pathlib
import pickle
def from_pickle(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""De-serialize Categorization or HierarchicalCategorization from a file written by
to_pickle.
Note that this uses the pickle module, which executes arbitrary code in the
provided file. Only load from pickle files that you trust."""
try:
spec = pickle.load(filepath)
except TypeError:
with open(filepath, "rb") as fd:
spec = pickle.load(fd)
return from_spec(spec)
|
e268f8c1467965bbba47c65ebba5f021171fc6ce
| 3,639,320
|
def recostruct(encoded, weights, bias):
"""
Reconstructor : Encoded -> Original
Not Functional
"""
weights.reverse()
for i,item in enumerate(weights):
encoded = encoded @ item.eval() + bias[i].eval()
return encoded
|
e17aeb6a819a6eec745c5dd811460049fa4a92cd
| 3,639,321
|
import math
def get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF,Verbose=True):#get_file_var_from_htmid in Eran's library
"""Description: given a catalog basename and the index of a trixel and the number of trixels in an HDF5 file,
create the trixel dataset name
Input :- CatName
- index
- NfilesinHDF: number of datasets in an HDF5 files (default is 100)
Output :- Filename: name of the HDF5 file where the trixel_dataset is stored
- Datasetname: name of the trixel_dataset
example:
By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018"""
if Verbose==True:
print('index is',index)
num_file=math.floor(index/NfilesinHDF)*NfilesinHDF #equivalent to index//Nfiles*Nfiles
Filename='%s_htm_%06d.hdf5' % (CatName, num_file)
DatasetName='htm_%06d' % index
return Filename,DatasetName
|
b9d0482780ae2a191175f1549513f46c047bb1cf
| 3,639,322
|
def calc_element_column(NH, fmineral, atom, mineral, d2g=0.009):
"""
Calculate the column density of an element for a particular NH value,
assuming a dust-to-gas ratio (d2g) and the fraction of dust in that
particular mineral species (fmineral)
"""
dust_mass = NH * mp * d2g * fmineral # g cm^{-2}
print('Dust mass = %.3e g cm^-2' % (dust_mass))
return calc_mass_conversion(atom, mineral) * dust_mass
|
d1e24602e6d329132d59f300543f306502867fc1
| 3,639,323
|
def output_dot(sieve, column_labels=None, max_edges=None, filename='structure.dot'):
""" A network representation of the structure in Graphviz format. Units in the produced file
are in bits. Weight is the mutual information and tc is the total correlation.
"""
print """Compile by installing graphviz and running a command like:
sfdp %s -Tpdf -Earrowhead=none -Nfontsize=12 \\
-GK=2 -Gmaxiter=1000 -Goverlap=False -Gpack=True \\
-Gpackmode=clust -Gsep=0.02 -Gratio=0.7 -Gsplines=True -o structure.pdf""" % filename
if column_labels is None:
column_labels = map(unicode, range(sieve.n_variables))
else:
column_labels = map(unicode, column_labels)
f = open(filename, 'w')
f.write('strict digraph {\n'.encode('utf-8'))
for i, column_label in enumerate(column_labels):
line = '%s [label="%s", shape=none]\n' % ('X_' + column_label, column_label)
f.write(line.encode('utf-8'))
for j, layer in enumerate(sieve.layers):
this_tc = 0.6 * sieve.tcs[j] / np.max(sieve.tcs)
line = 'Y_%d [shape=circle,margin="0,0",style=filled,fillcolor=black,' \
'fontcolor=white,height=%0.3f,label=Y%d,tc=%0.3f]\n' % (j, this_tc, j+1, sieve.tcs[j] / np.log(2))
f.write(line.encode('utf-8'))
mis = sieve.mis
print 'mis', mis
if max_edges is None or max_edges > mis.size:
w_threshold = 0.
else:
w_threshold = -np.sort(-np.ravel(mis))[max_edges]
for j, layer in enumerate(sieve.layers):
for i in range(sieve.n_variables):
w = mis[j, i] / np.log(2)
if w > w_threshold:
line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('X_'+str(i), 'Y_'+str(j), 2 * w, w)
f.write(line.encode('utf-8'))
for j2 in range(0, j):
w = mis[j, sieve.n_variables + j2] / np.log(2)
if w > w_threshold:
line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('Y_'+str(j2), 'Y_'+str(j), 2 * w, w)
f.write(line.encode('utf-8'))
f.write('}'.encode('utf-8'))
f.close()
return True
|
aa63e5ffb0bd1544f29391821db9ac49e690e3fe
| 3,639,324
|
def projectSimplex_vec(v):
""" project vector v onto the probability simplex
Parameter
---------
v: shape(nVars,)
input vector
Returns
-------
w: shape(nVars,)
projection of v onto the probability simplex
"""
nVars = v.shape[0]
mu = np.sort(v,kind='quicksort')[::-1]
sm_hist = np.cumsum(mu)
flag = (mu - 1./np.arange(1,nVars+1)*(sm_hist-1) > 0)
lastTrue = len(flag) - 1 - flag[::-1].argmax()
sm_row = sm_hist[lastTrue]
theta = 1./(lastTrue+1) * (sm_row - 1)
w = np.maximum(v-theta, 0.)
return w
|
ace378ed84c61e05e04fdad23e3d97127e63df3a
| 3,639,325
|
from typing import Collection
from typing import List
from typing import Sized
def render_list(something: Collection, threshold: int, tab: str) -> List[str]:
"""
Разложить список или что то подобное
"""
i = 1
sub_storage = []
order = '{:0' + str(len(str(len(something)))) + 'd}'
for element in something:
if isinstance(element, Sized) and len(element) > threshold:
add = []
render(element, threshold, add, tab + '\t')
sub_storage.extend(add)
else:
sub_storage.append(f'{tab}{order.format(i)}| {element!r}')
i += 1
return sub_storage
|
a7eb47df956fc4404bae6e29e75b280cd2b70cba
| 3,639,326
|
from typing import Optional
from typing import List
from typing import Tuple
def combine_result(
intent_metrics: IntentMetrics,
entity_metrics: EntityMetrics,
response_selection_metrics: ResponseSelectionMetrics,
interpreter: Interpreter,
data: TrainingData,
intent_results: Optional[List[IntentEvaluationResult]] = None,
entity_results: Optional[List[EntityEvaluationResult]] = None,
response_selection_results: Optional[
List[ResponseSelectionEvaluationResult]
] = None,
) -> Tuple[IntentMetrics, EntityMetrics, ResponseSelectionMetrics]:
"""Collects intent, response selection and entity metrics for cross validation
folds.
If `intent_results`, `response_selection_results` or `entity_results` is provided
as a list, prediction results are also collected.
Args:
intent_metrics: intent metrics
entity_metrics: entity metrics
response_selection_metrics: response selection metrics
interpreter: the interpreter
data: training data
intent_results: intent evaluation results
entity_results: entity evaluation results
response_selection_results: reponse selection evaluation results
Returns: intent, entity, and response selection metrics
"""
(
intent_current_metrics,
entity_current_metrics,
response_selection_current_metrics,
current_intent_results,
current_entity_results,
current_response_selection_results,
) = compute_metrics(interpreter, data)
if intent_results is not None:
intent_results += current_intent_results
if entity_results is not None:
entity_results += current_entity_results
if response_selection_results is not None:
response_selection_results += current_response_selection_results
for k, v in intent_current_metrics.items():
intent_metrics[k] = v + intent_metrics[k]
for k, v in response_selection_current_metrics.items():
response_selection_metrics[k] = v + response_selection_metrics[k]
for extractor, extractor_metric in entity_current_metrics.items():
entity_metrics[extractor] = {
k: v + entity_metrics[extractor][k] for k, v in extractor_metric.items()
}
return intent_metrics, entity_metrics, response_selection_metrics
|
86942bbb30fe86fcd8e3453e7ac661b97832ec1a
| 3,639,327
|
import jobtracker
def get_fns_for_jobid(jobid):
"""Given a job ID number, return a list of that job's data files.
Input:
jobid: The ID number from the job-tracker DB to get files for.
Output:
fns: A list of data files associated with the job ID.
"""
query = "SELECT filename " \
"FROM files, job_files " \
"WHERE job_files.file_id=files.id " \
"AND job_files.job_id=%d" % jobid
rows = jobtracker.query(query)
fns = [str(row['filename']) for row in rows]
return fns
|
ab867ec7b86981bfd06caf219b77fbb9410277ad
| 3,639,328
|
def linear_schedule(initial_value: float):
"""
Linear learning rate schedule.
:param initial_value: Initial learning rate.
:return: schedule that computes
current learning rate depending on remaining progress
"""
def func(progress_remaining: float) -> float:
"""
Progress will decrease from 1 (beginning) to 0.
:param progress_remaining:
:return: current learning rate
"""
return progress_remaining * initial_value
return func
|
afb0c9f050081f7e84728051535a899d9ece43f3
| 3,639,329
|
def download(os_list, software_list, dst):
"""
按软件列表下载其他部分
"""
if os_list is None:
os_list = []
arch = get_arch(os_list)
LOG.info('software arch is {0}'.format(arch))
results = {'ok': [], 'failed': []}
no_mindspore_list = [software for software in software_list if "MindSpore" not in software]
for software in no_mindspore_list:
res = download_software(software, dst, arch)
if res:
results['ok'].append(software)
continue
results['failed'].append(software)
return results
|
9def81d5c1f127cab08add62a16df35c2a9dbc80
| 3,639,330
|
import hashlib
def get_hash_bin(shard, salt=b"", size=0, offset=0):
"""Get the hash of the shard.
Args:
shard: A file like object representing the shard.
salt: Optional salt to add as a prefix before hashing.
Returns: Hex digetst of ripemd160(sha256(salt + shard)).
"""
shard.seek(0)
digest = partialhash.compute(shard, offset=offset, length=size, seed=salt,
hash_algorithm=hashlib.sha256)
shard.seek(0)
return ripemd160(digest).digest()
|
94c399d41b56598e4ecac3f0c2d917a226e9e9db
| 3,639,331
|
def boltzmann_statistic(
properties: ArrayLike1D,
energies: ArrayLike1D,
temperature: float = 298.15,
statistic: str = "avg",
) -> float:
"""Compute Boltzmann statistic.
Args:
properties: Conformer properties
energies: Conformer energies (a.u.)
temperature: Temperature (K)
statistic: Statistic to compute: 'avg', 'var' or 'std'
Returns:
result: Boltzmann statistic
"""
properties = np.array(properties)
# Get conformer weights
weights = boltzmann_weights(energies, temperature)
# Compute Boltzmann weighted statistic
result: float
if statistic == "avg":
result = np.average(properties, weights=weights)
elif statistic == "var":
avg = np.average(properties, weights=weights)
result = np.sum(weights * (properties - avg) ** 2)
elif statistic == "std":
avg = np.average(properties, weights=weights)
var = np.sum(weights * (properties - avg) ** 2)
result = np.sqrt(var)
return result
|
5c5ea2d9ff43e9e068856d73f1e6bdc1f53c42b0
| 3,639,332
|
def _check_n_pca_components(ica, _n_pca_comp, verbose=None):
"""Aux function"""
if isinstance(_n_pca_comp, float):
_n_pca_comp = ((ica.pca_explained_variance_ /
ica.pca_explained_variance_.sum()).cumsum()
<= _n_pca_comp).sum()
logger.info('Selected %i PCA components by explained '
'variance' % _n_pca_comp)
elif _n_pca_comp is None or _n_pca_comp < ica.n_components_:
_n_pca_comp = ica.n_components_
return _n_pca_comp
|
1295de84f6054cac3072e2ba861c291cf71fdb72
| 3,639,333
|
def parse(text):
"""
This is what amounts to a simple lisp parser for turning the server's
returned messages into an intermediate format that's easier to deal
with than the raw (often poorly formatted) text.
This parses generally, taking any lisp-like string and turning it into a
list of nested lists, where each nesting indicates a parenthesized
expression. holding multiple top-level parenthesized expressions. Ex: "(baz
0 (foo 1.5))" becomes ['baz', 0, ['foo', 1.5]].
"""
# make sure all of our parenthesis match
if text.count(b"(") != text.count(b")"):
raise ValueError("Message text has unmatching parenthesis!")
# result acts as a stack that holds the strings grouped by nested parens.
# result will only ever contain one item, the first level of indenting
# encountered. this is because the server (hopefully!) only ever sends one
# message at a time.
# TODO: make sure that the server only ever sends one message at a time!
result = []
# the current level of indentation, used to append chars to correct level
indent = 0
# the non-indenting characters we find. these are kept in a buffer until
# we indent or dedent, and then are added to the current indent level all
# at once, for efficiency.
s = []
# whether we're currently in the middle of parsing a string
in_string = False
# the last character seen, None to begin with
prev_c = None
for c in text.decode():
# prevent parsing parens when inside a string (also ignores escaped
# '"'s as well). doesn't add the quotes so we don't have to recognize
# that value as a string via a regex.
if c == '"' and prev_c != "\\":
in_string = not in_string
# we only indent/dedent if not in the middle of parsing a string
elif c == "(" and not in_string:
# recurse into current level of nesting
cur = result
for i in range(indent):
cur = cur[-1]
# add our buffered string onto the previous level, then clear it
# for the next.
if len(s) > 0:
val = ''.join(s)
# try to convert our string into a value and append it to our
# list. failing that, simply append it as an attribute name.
if pattern_int.match(val):
cur.append(int(val))
elif pattern_float.match(val):
cur.append(float(val))
else:
cur.append(val)
s = []
# append a new level of nesting to our list
cur.append([])
# increase the indent level so we can get back to this level later
indent += 1
elif c == ")" and not in_string:
# append remaining string buffer before dedenting
if len(s) > 0:
cur = result
for i in range(indent):
cur = cur[-1]
val = ''.join(s)
# try to convert our string into a value and append it to our
# list. failing that, simply append it as an attribute name.
if pattern_int.match(val):
cur.append(int(val))
elif pattern_float.match(val):
cur.append(float(val))
else:
cur.append(val)
s = []
# we finished with one level, so dedent back to the previous one
indent -= 1
# append non-space characters to the buffer list. spaces are delimiters
# for expressions, hence are special.
elif c != " ":
# append the current string character to the buffer list.
s.append(c)
# we separate expressions by spaces
elif c == " " and len(s) > 0:
cur = result
for i in range(indent):
cur = cur[-1]
val = ''.join(s)
# try to convert our string into a value and append it to our
# list. failing that, simply append it as an attribute name.
if pattern_int.match(val):
cur.append(int(val))
elif pattern_float.match(val):
cur.append(float(val))
else:
cur.append(val)
s = []
# save the previous character. used to determine if c is escaped
prev_c = c
# this returns the first and only message found. result is a list simply
# because it makes adding new levels of indentation simpler as it avoids
# the 'if result is None' corner case that would come up when trying to
# append the first '('.
return result[0]
|
a608d50a7425c6bd6420433aff673cddd8aa612f
| 3,639,334
|
def model_fn():
"""
Renvoie un modèle Inception3 avec la couche supérieure supprimée et les poids pré-entraînés sur imagenet diffusés.
"""
model = InceptionV3(
include_top=False, # Couche softmax de classification supprimée
weights='imagenet', # Poids pré-entraînés sur Imagenet
# input_shape=(100,100,3), # Image de taille 100x100 en couleur (channel=3)
pooling='max' # Utilisation du max de pooling
)
model.set_weights(bc_model_weights.value)
return model
|
3ee68e9874025d94cc1d73cf4857fecf6241e415
| 3,639,335
|
def find_correspondance_date(index, csv_file):
"""
The method returns the dates reported in the csv_file for the i-subject
:param index: index corresponding to the subject analysed
:param csv_file: csv file where all the information are listed
:return date
"""
return csv_file.EXAMDATE[index]
|
915b9a493247f04fc1f62e614bc26b6c342783c8
| 3,639,336
|
def get_config(object_config_id):
"""
Returns current and previous config
:param object_config_id:
:type object_config_id: int
:return: Current and previous config in dictionary format
:rtype: dict
"""
fields = ('config', 'attr', 'date', 'description')
try:
object_config = ObjectConfig.objects.get(id=object_config_id)
except ObjectConfig.DoesNotExist:
return None
config = {}
for name in ['current', 'previous']:
_id = getattr(object_config, name)
if _id:
config[name] = get_object(ConfigLog, _id, fields, ['date'])
else:
config[name] = None
return config
|
5eb31025494dbcf17890f3ed9e7165232db9e087
| 3,639,337
|
import unicodedata
def normalize_to_ascii(char):
"""Strip a character from its accent and encode it to ASCII"""
return unicodedata.normalize("NFKD", char).encode("ascii", "ignore").lower()
|
592e59ae10bb8f9a04dffc55bcc2a1a3cefb5e7e
| 3,639,338
|
def verify_certificate_chain(certificate, intermediates, trusted_certs, logger):
"""
:param certificate: cryptography.x509.Certificate
:param intermediates: list of cryptography.x509.Certificate
:param trusted_certs: list of cryptography.x509.Certificate
Verify that the certificate is valid, according to the list of intermediates and trusted_certs.
Uses legacy crypto.X509 functions as no current equivalent in https://cryptography.io/en/latest/
See:
https://gist.github.com/uilianries/0459f59287bd63e49b1b8ef03b30d421#file-cert-check-py
:return: bool
"""
try:
#Create a certificate store and add your trusted certs
store = crypto.X509Store()
for tc in trusted_certs:
store.add_cert(crypto.X509.from_cryptography(tc))
# Create a certificate context using the store, to check any intermediate certificates
for i in intermediates:
logger.info('| verifying intermediate certificates')
i_X509 = crypto.X509.from_cryptography(i)
store_ctx = crypto.X509StoreContext(store, i_X509)
store_ctx.verify_certificate()
# no exception, so Intermediate verified - add the intermediate to the store
store.add_cert(i_X509)
# Validate certificate against (trusted + intermediate)
logger.info('| intermediates passed, verifying user certificate')
store_ctx = crypto.X509StoreContext(store, crypto.X509.from_cryptography(certificate))
# Verify the certificate, returns None if it can validate the certificate
store_ctx.verify_certificate()
logger.info('| user certificate passed')
return True
except crypto.X509StoreContextError as e:
logger.warning(e)
return False
|
5d96fa38f22a74ae270af3ab35fc90274ed487e0
| 3,639,339
|
import json
def update_strip_chart_data(_n_intervals, acq_state, chart_data_json_str,
samples_to_display_val, active_channels):
"""
A callback function to update the chart data stored in the chartData HTML
div element. The chartData element is used to store the existing data
values, which allows sharing of data between callback functions. Global
variables cannot be used to share data between callbacks (see
https://dash.plot.ly/sharing-data-between-callbacks).
Args:
_n_intervals (int): Number of timer intervals - triggers the callback.
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data.
samples_to_display_val (float): The number of samples to be displayed.
active_channels ([int]): A list of integers corresponding to the user
selected active channel checkboxes.
Returns:
str: A string representation of a JSON object containing the updated
chart data.
"""
updated_chart_data = chart_data_json_str
samples_to_display = int(samples_to_display_val)
num_channels = len(active_channels)
if acq_state == 'running':
hat = globals()['_HAT']
if hat is not None:
chart_data = json.loads(chart_data_json_str)
# By specifying -1 for the samples_per_channel parameter, the
# timeout is ignored and all available data is read.
read_result = hat.a_in_scan_read(ALL_AVAILABLE, RETURN_IMMEDIATELY)
if ('hardware_overrun' not in chart_data.keys()
or not chart_data['hardware_overrun']):
chart_data['hardware_overrun'] = read_result.hardware_overrun
if ('buffer_overrun' not in chart_data.keys()
or not chart_data['buffer_overrun']):
chart_data['buffer_overrun'] = read_result.buffer_overrun
# Add the samples read to the chart_data object.
sample_count = add_samples_to_data(samples_to_display, num_channels,
chart_data, read_result)
# Update the total sample count.
chart_data['sample_count'] = sample_count
updated_chart_data = json.dumps(chart_data)
elif acq_state == 'configured':
# Clear the data in the strip chart when Configure is clicked.
updated_chart_data = init_chart_data(num_channels, samples_to_display)
return updated_chart_data
|
67902561bc4d0cec2a1ac2f8d385a2accf4c03e9
| 3,639,340
|
import uuid
def genuuid():
"""Generate a random UUID4 string."""
return str(uuid.uuid4())
|
c664a9bd45f0c00dedf196bb09a09c6cfaf0d54b
| 3,639,341
|
def watsons_f(DI1, DI2):
"""
calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table
"""
# first calculate R for the combined data set, then R1 and R2 for each individually.
# create a new array from two smaller ones
DI = np.concatenate((DI1, DI2), axis=0)
fpars = fisher_mean(DI) # re-use our functionfrom problem 1b
fpars1 = fisher_mean(DI1)
fpars2 = fisher_mean(DI2)
N = fpars['n']
R = fpars['r']
R1 = fpars1['r']
R2 = fpars2['r']
F = (N-2.)*((R1+R2-R)/(N-R1-R2))
Fcrit = fcalc(2, 2*(N-2))
return F, Fcrit
|
db1f6be50657f4721aac4f800b7896afcbd71db7
| 3,639,342
|
def encode(integer_symbol, bit_count):
""" Returns an updated version of the given symbol list with the given symbol encoded into binary.
- `symbol_list` - the list onto which to encode the value.
- `integer_symbol` - the integer value to be encoded.
- `bit_count` - the number of bits from the end of the symbol list to decode.
"""
assert type(integer_symbol) == int and integer_symbol >= 0, "The given symbol must be an integer greater than or equal to zero."
# Convert the symbol into a bit string.
bit_string = bin(integer_symbol)
# Strip off any '0b' prefix.
if bit_string.startswith('0b'):
bit_string = bit_string[2:]
# end if
# Convert the string into a list of integers.
bits = [int(bit) for bit in list(bit_string)]
# Check that the number of bits is not bigger than the given bit count.
bits_length = len(bits)
assert bit_count >= bits_length, \
"The given %d bits to encode with is not enough to encode %d bits." % \
(bit_count, bits_length)
# Calculate how many bits we need to pad the bit string with, if any, and pad with zeros.
pad_list = [0 for i in xrange(0, bit_count - bits_length)]
# Return the newly created bit list, with the zero padding first.
symbol_list = pad_list + bits
return symbol_list
|
fe8fb04245c053bb4387b0ac594a778df5bce22c
| 3,639,343
|
def superkick(update, context):
"""Superkick a member from all rooms by replying to one of their messages with the /superkick command."""
bot = context.bot
user_id = update.message.from_user.id
boot_id = update.message.reply_to_message.from_user.id
username = update.message.reply_to_message.from_user.name
admin = _admin(user_id)
if not admin:
return _for_admin_only_message(bot, user_id, username)
in_crab_wap = _in_group(context, user_id, config["GROUPS"]["crab_wiv_a_plan"])
in_tutorial = _in_group(context, user_id, config["GROUPS"]["tutorial"])
in_video_stars = _in_group(context, user_id, config["GROUPS"]["video_stars"])
if in_crab_wap:
bot.kick_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
if in_tutorial:
bot.kick_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
if in_video_stars:
bot.kick_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
remove_member(boot_id)
the_message = '{} has been *SUPER KICKED* from Crab Wiv A Plan, Tutorial Group, and VideoStars.' \
.format(escape_markdown(username))
bot.send_message(chat_id=config["GROUPS"]["boot_channel"],
text=the_message,
parse_mode='MARKDOWN')
bot.delete_message(chat_id=update.message.chat_id,
message_id=update.message.message_id)
|
2a6550bb533a51cc8ebb79ca7f5cdbd214af4a5a
| 3,639,344
|
import typing
from datetime import datetime
def encrypt_session(
signer: typing.Type[Fernet],
session_id: str,
current_time: typing.Optional[typing.Union[int, datetime]] = None,
) -> str:
"""An utility for generating a token from the passed session id.
:param signer: an instance of a fernet object
:param session_id: a user session id
:param current_time: a datetime object or timestamp indicating the time of the session id encryption. By default, it is now
"""
if current_time is None:
current_time = pendulum.now()
if isinstance(current_time, datetime):
current_time = current_time.timestamp()
return signer.encrypt_at_time(session_id.encode("utf-8"), int(current_time)).decode(
"utf-8"
)
|
9d924dcbc0abdf8facb31e256c5c67ccca3850be
| 3,639,345
|
def construct_chargelst(nsingle):
"""
Makes list of lists containing Lin indices of the states for given charge.
Parameters
----------
nsingle : int
Number of single particle states.
Returns
-------
chargelst : list of lists
chargelst[charge] gives a list of state indices for given charge,
chargelst[charge][ind] gives state index.
"""
nmany = np.power(2, nsingle)
chargelst = [[] for _ in range(nsingle+1)]
# Iterate over many-body states
for j1 in range(nmany):
state = integer_to_binarylist(j1, nsingle)
chargelst[sum(state)].append(j1)
return chargelst
|
e94044566d0acc7106d34d142ed3579226706a65
| 3,639,346
|
import json
def parse(json_string):
"""Constructs the Protocol from the JSON text."""
try:
json_data = json.loads(json_string)
except:
raise ProtocolParseException('Error parsing JSON: %s' % json_string)
# construct the Avro Protocol object
return make_avpr_object(json_data)
|
f95854e8c0b8e49ec71e03ee8487f88f4687ebf0
| 3,639,347
|
import os
def create_script_dict(allpacks, path, file, skip_lines):
"""Create script dict or skips file if resources cannot be made"""
allpacks["name"] = "FILL"
allpacks["title"] = "FILL"
allpacks["description"] = "FILL"
allpacks["citation"] = "FILL"
allpacks["licenses"] = [{"name": "FILL"}]
allpacks["keywords"] = []
allpacks["homepage"] = "FILL"
allpacks["version"] = "1.0.0"
try:
resources = create_resources(os.path.join(path, file), skip_lines)
except:
print("Skipped file: " + file)
return
allpacks.setdefault("resources", []).append(resources)
allpacks["retriever"] = "True"
allpacks["retriever_minimum_version"] = "2.1.0"
return allpacks
|
8bc229a3343676cf8b70c524119994e1ca49e054
| 3,639,348
|
def get_architecture(model_config: dict, feature_config: FeatureConfig, file_io):
"""
Return the architecture operation based on the model_config YAML specified
"""
architecture_key = model_config.get("architecture_key")
if architecture_key == ArchitectureKey.DNN:
return DNN(model_config, feature_config, file_io).get_architecture_op()
elif architecture_key == ArchitectureKey.LINEAR:
# Validate the model config
num_dense_layers = len([l for l in model_config["layers"] if l["type"] == "dense"])
if num_dense_layers == 0:
raise ValueError("No dense layers were specified in the ModelConfig")
elif num_dense_layers > 1:
raise ValueError("Linear model used with more than 1 dense layer")
else:
return DNN(model_config, feature_config, file_io).get_architecture_op()
elif architecture_key == ArchitectureKey.RNN:
raise NotImplementedError
else:
raise NotImplementedError
|
a7c58770a07c225ae79a03699639e19498d3a0c6
| 3,639,349
|
def get_properties_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,
limit: int = 1000) -> ResourceDictionary:
"""
Return a ResourceDictionary with the list of properties in the ontology
:param serialized_file: The file where the properties ResourceDictionary is serialized
:param sparql_file: The file containing the SPARQL query
:param repository: The repository containing the ontology
:param endpoint: The SPARQL endpoint
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: A ResourceDictionary with the list of properties in the ontology
"""
global_properties_dict = deserialize(serialized_file)
if global_properties_dict:
return global_properties_dict
global_properties_dict = ResourceDictionary()
global_properties_dict.add(RDF.type)
properties_sparql_query = open(sparql_file).read()
properties_sparql_query_template = Template(properties_sparql_query + " limit $limit offset $offset ")
for rdf_property in get_sparql_results(properties_sparql_query_template, ["property"], endpoint, repository,
endpoint_type, limit):
global_properties_dict.add(rdf_property[0])
serialize(global_properties_dict, serialized_file)
return global_properties_dict
|
3a31bd8b23cb7a940c6386225dd39a302f3d3f3a
| 3,639,350
|
def get_duplicate_sample_ids(taxonomy_ids):
"""Get duplicate sample IDs from the taxonomy table.
It happens that some sample IDs are associated with more than taxon. Which
means that the same sample is two different species. This is a data entry
error and should be removed. Conversely, having more than one sample for
a taxon is fine; it's just oversampling and will be handled later.
"""
taxonomy_ids['times'] = 0
errors = taxonomy_ids.groupby('sample_id').agg(
{'times': 'count', 'sci_name': ', '.join})
errors = errors.loc[errors.times > 1, :].drop(['times'], axis='columns')
sci_names = errors.sci_name.str.split(r'\s*[;,]\s*', expand=True)
id_cols = {i: f'sci_name_{i + 1}' for i in sci_names.columns}
sci_names = sci_names.rename(columns=id_cols)
errors = pd.concat([errors, sci_names], axis='columns').drop(
['sci_name'], axis=1)
return errors
|
c01315d6d51ec8e62a0f510944d724a18949aeb8
| 3,639,351
|
def get_settings_text(poll):
"""Compile the options text for this poll."""
text = []
locale = poll.user.locale
text.append(i18n.t('settings.poll_type',
locale=locale,
poll_type=translate_poll_type(poll.poll_type, locale)))
text.append(i18n.t('settings.language', locale=locale, language=poll.locale))
if poll.anonymous:
text.append(i18n.t('settings.anonymous', locale=locale))
else:
text.append(i18n.t('settings.not_anonymous', locale=locale))
if poll.due_date:
text.append(i18n.t('settings.due_date', locale=locale,
date=poll.get_formatted_due_date()))
else:
text.append(i18n.t('settings.no_due_date', locale=locale))
if poll.results_visible:
text.append(i18n.t('settings.results_visible', locale=locale))
else:
text.append(i18n.t('settings.results_not_visible', locale=locale))
text.append('')
if poll.allow_new_options:
text.append(i18n.t('settings.user_options', locale=locale))
else:
text.append(i18n.t('settings.no_user_options', locale=locale))
if poll.results_visible:
if poll.show_percentage:
text.append(i18n.t('settings.percentage', locale=locale))
else:
text.append(i18n.t('settings.no_percentage', locale=locale))
if poll.has_date_option():
if poll.european_date_format:
text.append(i18n.t('settings.euro_date_format', locale=locale))
else:
text.append(i18n.t('settings.us_date_format', locale=locale))
text.append('')
# Sorting of user names
if poll.poll_type == PollType.doodle.name:
sorting_name = i18n.t(f'sorting.doodle_sorting', locale=locale)
text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name))
elif not poll.anonymous:
sorting_name = i18n.t(f'sorting.{poll.user_sorting}', locale=locale)
text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name))
sorting_name = i18n.t(f'sorting.{poll.option_sorting}', locale=locale)
text.append(i18n.t('settings.option_sorting', locale=locale, name=sorting_name))
return '\n'.join(text)
|
24ef467070324dac6a8c698b791a1fe577a5d928
| 3,639,352
|
import ffmpeg
from datetime import datetime
import time
import os
import math
def acd(strymobj= None, window_size=30, plot_iteration = False, every_iteration = 200, plot_timespace = True, save_timespace = False, wave_threshold = 50.0, animation = False, title = 'Average Centroid Distance', **kwargs):
"""
Average Centroid Distance Algorithm for calculating stop-and-go wavestrength from
`acd` implements average centroid distance algorithm to find out the stop-and-go distance traveled based on the given
threshold.
Parameters
-------------
strymobj: `strymread`
A valid stymread object
window_size: `int`
Window size over which to form the cluster of data points on speed-acceleration phasespace
plot_iteration: `bool`
If `True` plots the intermediate phase-space plots of speed-acceleration phasespace for the `window_size` and distribution of centroid distances
every_iteration: `int`
If `plot_iteration` is true, then plot the intermediate figures every `every_iteration` iteration
plot_timespace: `bool`
If `True` plots and save timespace diagram of wavestrength for the given drive.
save_timespace: `bool`
If `True` save the timespace diagram to the disk
wave_threshold: `double`
The value of threshold of wavestrength above which classify the driving mode as stop-and-go. It defaults to the value of 50.
animation: `bool`
If `True` produces animation of phasespace evolving with the time
title: `str`
Desire plot title for phasespace animation
image_path: `str`
Path on the disk where to store phasespace animation
Returns
----------
`pandas.DataFrame`
Returns Pandas Data frame consisting of WaveStrength column as a timeseries
`double`
Returns stop-and-go distance measured based on the `wave_threshold` in meters
"""
# Check strymread object was able to successfully read the
if strymobj is not None:
if not strymobj.success:
print("Invalid/Errored strymread object supplied. Check if supplied datafile to strymread is valid.")
return None
file_name = strymobj.csvfile
file_name = file_name.split('/')[-1][0:-4]
## Get the speed
speed = strymobj.speed()
if speed.shape[0] == 0:
print("No speed data found\n")
return None
elif speed.shape[0] < 10:
print("Speed data too low. Skipping ...\n")
return None
### Convert speed to m/s
speed['Message'] = speed['Message']*0.277778
position = strymread.integrate(speed)
# Get the position
## Get the longitudinal acceleration
accelx = strymobj.accelx()
if accelx.shape[0] == 0:
print("No Acceleration data found\n")
return None
elif accelx.shape[0] < 10:
print("Acceleration data too low. Skipping ...\n")
return None
else:
file_name = ''
speed = kwargs.get("speed", None)
if speed is None:
print("No speed data provided. Skipping ...\n")
return None
accelx = kwargs.get("accelx", None)
if accelx is None:
print("No longitudinal data provided. Skipping ...\n")
return None
speed_unit = kwargs.get("speed_unit", "km/h")
if speed_unit.lower() not in ["km/h", "m/s"]:
print("Unrecognized speed unit '{}'. Provide speed unit in km/h or m/s\n".format(speed_unit))
return None
if speed_unit.lower() == "km/h":
### Convert speed to m/s
speed['Message'] = speed['Message']*0.277778
elif speed_unit.lower() == "m/s":
print("INFO: Speed unit is m/s")
position = kwargs.get("position", None)
if position is None:
position = strymread.integrate(speed)
# strymread.plt_ts(speed, title="Original Speed (m/s)")
# strymread.plt_ts(position, title="Original Position (m)")
# strymread.plt_ts(accelx, title="Original Accel (m/s^2)")
# Synchronize speed and acceleration for common time points with a rate of 20 Hz
rate = kwargs.get("rate", 20)
speed_resampled, accel_resampled = strymread.ts_sync(speed, accelx, rate=rate, method = "nearest")
position_resampled, _ = strymread.ts_sync(position, accelx, rate=rate, method = "nearest")
# strymread.plt_ts(speed_resampled, title="Resampled Speed (m/s)")
# strymread.plt_ts(position_resampled, title="Resampled position (m)")
# strymread.plt_ts(accel_resampled, title="Resampled Accel (m/s^2)")
assert ((speed_resampled.shape[0] == accel_resampled.shape[0]) and (position_resampled.shape[0]==accel_resampled.shape[0])), "Synchronization Error"
df = speed_resampled.copy(deep=True)
df["Speed"] = speed_resampled["Message"]
df["Accelx"] = accel_resampled["Message"]
df["Position"] = position_resampled["Message"]
df.drop(columns=["Message"], inplace=True)
if df.shape[0] < 3:
print("Extremely low sample points in synced-resampled data to obtain any meaningful measure. Skipping ...")
return None
DeltaT = np.mean(df['Time'].diff())
#print(1./DeltaT)
n_Sample_WS = int((1/DeltaT)*window_size) # Number of samples for window_size
print("Number of samples for {} seconds: {}".format(window_size, n_Sample_WS))
df.index = np.arange(0, df.shape[0])
#print(n_Sample_WS)
df['wavestrength'] = 0
df['EllipseFit_semimajor_axis_len'] = 0
df['EllipseFit_semiminor_axis_len'] = 0
df['Goodness_of_Ellipse_Fit'] = 0
count = 0
# Save images in /tmp folder dy default
dt_object = datetime.datetime.fromtimestamp(time.time())
dt = dt_object.strftime('%Y-%m-%d-%H-%M-%S-%f')
image_path = kwargs.get("image_path", "/tmp")
image_path = image_path + '/WaveStrength_' + dt
if animation:
if not os.path.exists(image_path):
try:
os.mkdir(image_path)
except OSError:
print("[ERROR] Failed to create the image folder {0}.".format(image_path))
figure_count = 1
for r, row in df.iterrows():
if r <=n_Sample_WS:
continue
df_tempWS = df[r-n_Sample_WS-1:r-1]
velocity_tempWS = pd.DataFrame()
velocity_tempWS['Time'] = df_tempWS['Time']
velocity_tempWS['Message'] = df_tempWS['Speed']
accel_tempWS = pd.DataFrame()
accel_tempWS['Time'] = df_tempWS['Time']
accel_tempWS['Message'] = df_tempWS['Accelx']
ps = phasespace(dfx=velocity_tempWS, dfy=accel_tempWS, resample_type="first", verbose=False)
if np.all(velocity_tempWS['Message'] == 0) or np.all(accel_tempWS['Message'] == 0):
z1 = 0
z2 = 0
r1 = 0
r2 = 0
phi = 0
residual = 0
else:
z1, z2, r1, r2, phi, residual = ellipse_fit(x = velocity_tempWS['Message'].to_numpy(), y = accel_tempWS['Message'].to_numpy())
count = count + 1
if plot_iteration or animation:
if count % every_iteration == 0:
count = 0
print("--------------------------------------------------------------")
print('Time Range: {} to {}'.format(accel_tempWS['Time'].iloc[0], accel_tempWS['Time'].iloc[-1]))
#fig, ax = strymread.create_fig()
fig, ax = _acdplots()
strymread.plt_ts(speed_resampled, ax = ax[0], show = False, title = "Speed")
strymread.plt_ts(accel_resampled, ax = ax[1], show = False, title="Acceleration")
# Create a Rectangle patch that represents window of the iteration
rect = patches.Rectangle((velocity_tempWS['Time'].iloc[0], np.min(speed_resampled['Message'])),\
np.abs(velocity_tempWS['Time'].iloc[-1] - velocity_tempWS['Time'].iloc[0]),\
np.max(speed_resampled['Message']) - np.min(speed_resampled['Message']),\
linewidth=4,edgecolor='g',facecolor='none')
ax[0].add_patch(rect)
rect = patches.Rectangle((accel_tempWS['Time'].iloc[0], np.min(accel_resampled['Message'])),\
np.abs(accel_tempWS['Time'].iloc[-1] - accel_tempWS['Time'].iloc[0]),\
np.max(accel_resampled['Message']) - np.min(accel_resampled['Message']),\
linewidth=4,edgecolor='g',facecolor='none')
ax[1].add_patch(rect)
ax1 = ps.phaseplot(title='Phase-space plot',\
xlabel='Speed', ylabel='Acceleration', plot_each = True, ax = [ax[2], ax[3], ax[4]], show = False, fig = fig)
subtext = 'Time Window: ['+\
str(accel_tempWS['Time'].iloc[0]) + ', ' + str(accel_tempWS['Time'].iloc[-1])+']\n' + file_name +'\n'
ax[2].xaxis.label.set_size(35)
ax[3].xaxis.label.set_size(35)
ax[4].xaxis.label.set_size(35)
ax[2].yaxis.label.set_size(35)
ax[3].yaxis.label.set_size(35)
ax[4].yaxis.label.set_size(35)
ax[2].title.set_fontsize(40)
ax[3].title.set_fontsize(40)
ax[4].title.set_fontsize(40)
ax[4].set_xlim(np.min(speed_resampled['Message'])-2.0, np.max(speed_resampled['Message'])+ 2.0)
ax[4].set_ylim(np.min(accel_resampled['Message'])-2.0, np.max(accel_resampled['Message'])+ 2.0)
ax[4].set_aspect('equal', adjustable='box')
c1= patches.Ellipse((z1, z2), r1*2,r2*2, angle = math.degrees(phi), color='g', fill=False, linewidth = 5)
ax[4].add_artist(c1)
ax2 = ps.centroidplot( xlabel='Centroid Distance', ylabel='Counts', ax = ax[5], show = False)
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
my_suptitle = fig.suptitle(title + '\n' + subtext, y = 1.06)
if animation:
figure_file_name = image_path + '/' + "wave_strength_{:06d}.png".format(figure_count)
fig.savefig(figure_file_name, dpi = 100,bbox_inches='tight',bbox_extra_artists=[my_suptitle])
figure_count = figure_count + 1
if plot_iteration:
plt.show()
else:
fig.clear()
plt.close(fig)
print("Average Centroid Distane of cluster is {}".format(ps.acd))
#df.iloc[df_tempWS.index[-1], df.columns.get_loc('wavestrength') ] = ps.acd
df['wavestrength'].iloc[df_tempWS.index[-1]] = ps.acd
#df.iloc[df_tempWS.index[-1], df.columns.get_loc('EllipseFit_semimajor_axis_len') ] = r1
#df.iloc[df_tempWS.index[-1], df.columns.get_loc('EllipseFit_semiminor_axis_len') ] = r2
#df.iloc[df_tempWS.index[-1], df.columns.get_loc('Goodness_of_Ellipse_Fit') ] = residual
df['EllipseFit_semimajor_axis_len'].iloc[df_tempWS.index[-1]] = r1
df['EllipseFit_semiminor_axis_len'].iloc[df_tempWS.index[-1]] = r2
df['Goodness_of_Ellipse_Fit'].iloc[df_tempWS.index[-1]] = residual
if animation:
figdirs = os.listdir(image_path)
figdirs.sort()
video_name = 'wave_strength' + dt + '.mp4'
(
ffmpeg
.input(image_path + '/*.png', pattern_type='glob', framerate=5)
.output(video_name)
.run()
)
# Filter out data for which strong wave was detected
high_wave = df[df['wavestrength'] > wave_threshold]
# high_wave now is discontinuous in Time column, use this information to create separate
# continuous chunks and over which we calculate the total distance
high_wave_chunk = strymread.create_chunks(high_wave, continuous_threshold=0.1, \
column_of_interest = 'Time', plot = False)
# stop_ang_go_distance = 0.0
# for c in high_wave_chunk:
# d = c['Position'][-1] - c['Position'][0]
# stop_ang_go_distance = stop_ang_go_distance + d
stop_ang_go_distance = 0.0
for c in high_wave_chunk:
pos_temp = strymread.integrate(c, msg_axis="Speed")
stop_ang_go_distance = stop_ang_go_distance + pos_temp['Message'][-1]
if plot_timespace or save_timespace:
fig, ax = strymread.create_fig(nrows = 4, ncols=1)
im = ax[0].scatter(df['Time'], df['Position'], c=np.log(df['wavestrength']+1), cmap=strymread.sunset, s=3)
im2 = ax[1].scatter(df['Time'], df['Position'], c=df['Speed'], cmap=strymread.sunset, s=3)
im3 = ax[2].scatter(df['Time'], df['Speed'], c=df['Time'], cmap=strymread.sunset, s=3)
im4 = ax[3].scatter(df['Time'], df['wavestrength'], c=df['Time'], cmap=strymread.sunset, s=3)
cbr= strymread.set_colorbar(fig = fig, ax = ax[0], im = im, label = "log(wavestrength+1)")
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Position')
ax[0].set_title('Time-Space Diagram with log(wavestrength+1) as color map')
cbr= strymread.set_colorbar(fig = fig, ax = ax[1], im = im2, label = "speed")
ax[1].set_xlabel('Time')
ax[1].set_ylabel('Position')
ax[1].set_title('Time-Space Diagram with speed as color map')
cbr= strymread.set_colorbar(fig = fig, ax = ax[2], im = im3, label = "Time")
ax[2].set_xlabel('Time')
ax[2].set_ylabel('Speed')
ax[2].set_title('Time-Speed Diagram with Time as color map')
cbr= strymread.set_colorbar(fig = fig, ax = ax[3], im = im4, label = "Time")
ax[3].set_xlabel('Time')
ax[3].set_ylabel('wavestrength')
ax[3].set_title('Time-WaveStrength Diagram with Time as color map')
dt_object = datetime.datetime.fromtimestamp(time.time())
dt = dt_object.strftime('%Y-%m-%d-%H-%M-%S-%f')
if save_timespace:
file_to_save = "ACD_"+ file_name + "_time_space_diagram_" + dt
fig.savefig(file_to_save + ".png", dpi = 100)
fig.savefig(file_to_save + ".pdf", dpi = 100)
if plot_timespace:
plt.show()
else:
plt.close(fig)
return df, stop_ang_go_distance
|
ffd239f28b3abc801e4a0755e97133e409a058cc
| 3,639,353
|
import functools
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
|
2264ca5978485d8fc13377d17eb84ee522a040b9
| 3,639,354
|
def create_values_key(key):
"""Creates secondary key representing sparse values associated with key."""
return '_'.join([key, VALUES_SUFFIX])
|
e8a70bc4ef84a7a62a9d8b8d915b9ddbc0990429
| 3,639,355
|
def make_mask(variable, **flags):
"""
Return a mask array, based on provided flags
For example:
make_mask(pqa, cloud_acca=False, cloud_fmask=False, land_obs=True)
OR
make_mask(pqa, **GOOD_PIXEL_FLAGS)
where GOOD_PIXEL_FLAGS is a dict of flag_name to True/False
:param variable:
:type variable: xarray.Dataset or xarray.DataArray
:param flags: list of boolean flags
:return:
"""
flags_def = get_flags_def(variable)
mask, mask_value = create_mask_value(flags_def, **flags)
return variable & mask == mask_value
|
fcdd7247359b5127d14a906298e20a05fd63b108
| 3,639,356
|
def _normalize_block_comments(content: str) -> str:
"""Add // to the beginning of all lines inside a /* */ block"""
comment_partitions = _partition_block_comments(content)
normalized_partitions = []
for partition in comment_partitions:
if isinstance(partition, Comment):
comment = partition
normalized_comment_lines = []
comment_lines = comment.splitlines(keepends=True)
normalized_comment_lines.append(comment_lines[0])
for line in comment_lines[1:]:
if line.lstrip().startswith("//"):
normalized_line = line
else:
normalized_line = f"// {line}"
normalized_comment_lines.append(normalized_line)
normalized_comment = f'/*{"".join(normalized_comment_lines)}*/'
normalized_partitions.append(normalized_comment)
else:
normalized_partitions.append(partition)
normalized_content = "".join(normalized_partitions)
return normalized_content
|
76c2c1d0b80cf40f647033aa8745058f1546076e
| 3,639,357
|
from datetime import datetime
def check_holidays(date_start, modified_end_date, holidays):
"""
Here app check if holidays in dates of vacation or not.
If Yes - add days to vacation, if Not - end date unchangeable
"""
# first end date for check loop because end date move +1 for every weekend
date_1 = datetime.strptime(date_start, '%d.%m.%Y') # start date
# second end date (after add holidays)
date_2 = datetime.strptime(modified_end_date, '%d.%m.%Y')
# third end date for finish date after adding holidays in vacations
date_3 = datetime.strptime(modified_end_date, '%d.%m.%Y')
# counter for days in vacation
x = 0
# loop for check dates in created holidays list
for i in holidays:
if date_1 <= datetime.strptime(i, '%d.%m.%Y') <= date_2:
print(i)
x += 1
date_2 = date_2 + timedelta(days=1)
print(x)
# adding counter to first end date
date_end = date_3 + timedelta(days=x)
date_end = datetime.strftime(date_end, '%d.%m.%Y')
return date_end
|
c2b8145f9963cd2679e238c2c378535eea2e08db
| 3,639,358
|
import os
import warnings
import logging
def getHouseholdProfiles(
n_persons,
weather_data,
weatherID,
seeds=[0],
ignore_weather=True,
mean_load=True,
cores=mp.cpu_count() - 1,
):
"""
Gets or creates the relevant occupancy profiles for a building
simulation or optimization.
Parameters
----------
n_persons: integer, required
Number of persons living in a single appartment.
weather_data: pd.DataFrame(), required
A time indexed pandas dataframe containing weather data with
the GHI as a column.
weatherID: str, required
Giving an ID to the weather data to identify the resulting profile.
seeds: list, optional (default: [0])
List of integer seeds to create a number of profiles which have
similar input parameters, but a varying output. Default, no seed is
chosen.
ignore_weather: bool, optional (default: False)
Since atm only the GHI is required for the electricity load profile,
the weather plays a minor role and can be ignored by the identificaiton
of profiles.
mean_load: bool, optional (default: True)
Decides if the created load profiles on 1-minute basis shall be
downsampled by taking the mean of 60 minutes or the first value in
every 60 minutes.
cores: int, optiona(default: mp.cpu_count() - 1)
Number of cores used for profile generation.
"""
# get the potential profile names
filenames = {}
for seed in seeds:
profile_ID = "Profile" + "_occ" + str(int(n_persons)) + "_seed" + str(seed)
if not ignore_weather:
profile_ID = profile_ID + "_wea" + str(weatherID)
if mean_load:
profile_ID = profile_ID + "_mean"
filenames[seed] = os.path.join(
PATH, "results", "occupantprofiles", profile_ID + ".csv"
)
# check how many profiles do not exist#
not_existing_profiles = {}
for seed in seeds:
if not os.path.isfile(filenames[seed]):
not_existing_profiles[seed] = filenames[seed]
# info about runtime
if cores < 1:
warnings.warn('Recognized cores are less than one. The code will behave as the number is one.')
cores = 1
_runtime = np.floor(float(len(not_existing_profiles))/cores)
_log_str = str(len(not_existing_profiles)) + " household profiles need to get calculated. \n"
_log_str += "With " + str(cores) + " threads, the estimated runtime is " + str(_runtime) + " minutes."
logging.info(_log_str)
# run in parallel all profiles
if len(not_existing_profiles) > 1:
new_profiles = simHouseholdsParallel(
int(n_persons),
weather_data.index[0].year,
len(not_existing_profiles),
singleProfiles=True,
weather_data=weather_data,
get_hot_water=True,
resample_mean=mean_load,
cores=cores,
)
# if single profile just create one profile and avoid multiprocessing
elif len(not_existing_profiles) > 0:
one_profile = simSingleHousehold(
int(n_persons),
weather_data.index[0].year,
weather_data=weather_data,
get_hot_water=True,
resample_mean=mean_load,
)
new_profiles = [one_profile]
# write results to csv files
for i, seed in enumerate(not_existing_profiles):
new_profiles[i].to_csv(not_existing_profiles[seed])
# load all profiles
profiles = []
for seed in seeds:
profile = pd.read_csv(filenames[seed], index_col=0)
# TODO get a proper indexing in tsorb based on the weather data
profile.index = weather_data.index
profiles.append(profile)
return profiles
|
b5284b03633699337075634d3484860d9c062e40
| 3,639,359
|
from typing import Optional
from pathlib import Path
import platform
def get_local_ffmpeg() -> Optional[Path]:
"""
Get local ffmpeg binary path.
### Returns
- Path to ffmpeg binary or None if not found.
"""
ffmpeg_path = Path(
get_spotdl_path(), "ffmpeg" + ".exe" if platform.system() == "Windows" else ""
)
if ffmpeg_path.is_file():
return ffmpeg_path
return None
|
2495a1153da32f3ffb21075172cd0fb82b7809ea
| 3,639,360
|
def remaining_time(trace, event):
"""Calculate remaining time by event in trace
:param trace:
:param event:
:return:
"""
# FIXME using no timezone info for calculation
event_time = event['time:timestamp'].strftime("%Y-%m-%dT%H:%M:%S")
last_time = trace[-1]['time:timestamp'].strftime("%Y-%m-%dT%H:%M:%S")
try:
delta = dt.strptime(last_time, TIME_FORMAT) - dt.strptime(event_time, TIME_FORMAT)
except ValueError:
# Log has no timestamps
return 0
return delta.total_seconds()
|
87e961ca4091e8cc572c845968476a264aad5f27
| 3,639,361
|
def _water_vapor_pressure_difference(temp, wet_bulb_temp, vap_press, psych_const):
"""
Evaluate the psychrometric formula
e_l - (e_w - gamma * (T_a - T_w)).
Parameters
----------
temp : numeric
Air temperature (K).
wet_bulb_temp : numeric
Wet-bulb temperature (K).
vap_press : numeric
Vapor pressure (Pa).
psych_const : numeric
Psychrometric constant (Pa K-1).
Returns
-------
wat_vap_press_diff : numeric
Water vapor pressure difference (Pa).
"""
sat_vap_press_wet_bulb = saturation_vapor_pressure(wet_bulb_temp)
return vap_press - (sat_vap_press_wet_bulb - psych_const * (temp - wet_bulb_temp))
|
cee814a44ae1736dc35f08984cdb15fe94576716
| 3,639,362
|
def _service_description_required(func):
"""
Decorator for checking whether the service description is available on a device's service.
"""
@wraps(func)
def wrapper(service, *args, **kwargs):
if service.description is None:
raise exceptions.NotRetrievedError('No service description retrieved for this service.')
elif service.description == exceptions.NotAvailableError:
return
return func(service, *args, **kwargs)
return wrapper
|
27b962616026ad3987d2c214138d903971e2461c
| 3,639,363
|
def vector(*args):
"""
A single vector in any coordinate basis,
as a numpy array.
"""
return N.array(args)
|
41da98ad36bff55fc4b71ce6b4e604262b2ecd1a
| 3,639,364
|
def arcmin_to_deg(arcmin: float) -> float:
""" Convert arcmin to degree """
return arcmin / 60
|
9ef01181a319c0c48542ac57602bd7c17a7c1ced
| 3,639,365
|
def soft_embedding_lookup(embedding, soft_ids):
"""Transforms soft ids (e.g., probability distribution over ids) into
embeddings, by mixing the embedding vectors with the soft weights.
Args:
embedding: A Tensor of shape `[num_classes] + embedding-dim` containing
the embedding vectors. Embedding can have dimensionality > 1, i.e.,
:attr:`embedding` can be of shape
`[num_classes, emb_dim_1, emb_dim_2, ...]`
soft_ids: A Tensor of weights (probabilities) used to mix the
embedding vectors.
Returns:
A Tensor of shape `shape(soft_ids)[:-1] + shape(embedding)[1:]`. For
example, if `shape(soft_ids) = [batch_size, max_time, vocab_size]`
and `shape(embedding) = [vocab_size, emb_dim]`, then the return tensor
has shape `[batch_size, max_time, emb_dim]`.
Example::
decoder_outputs, ... = decoder(...)
soft_seq_emb = soft_embedding_lookup(
embedding, tf.nn.softmax(decoder_outputs.logits))
"""
return tf.tensordot(tf.to_float(soft_ids), embedding, [-1, 0])
|
4b831b8f23a226aac74c0bb3919e3c27bb57dc60
| 3,639,366
|
def param_11(i):
"""Returns parametrized Exp11Gate."""
return Exp11Gate(half_turns=i)
|
5458c8a4e992bd38dbb114e9ae4c4bac8a86fc75
| 3,639,367
|
def resolve_link(db: Redis[bytes], address: hash_t) -> hash_t:
"""Resolve any link recursively."""
key = join(ARTEFACTS, address, "links_to")
link = db.get(key)
if link is None:
return address
else:
out = hash_t(link.decode())
return resolve_link(db, out)
|
b8087b2d015fc4b8515c35e437e609a935ccfcb2
| 3,639,368
|
def image_ppg(ppg_np):
"""
Input:
ppg: numpy array
Return:
ax: 画布信息
im:图像信息
"""
ppg_deps = ppg.DependenciesPPG()
ppg_M = Matrix(ppg_np)
monophone_ppgs = ppg.reduce_ppg_dim(ppg_M, ppg_deps.monophone_trans)
monophone_ppgs = monophone_ppgs.numpy().T
fig, ax = plt.subplots(figsize=(10, 6))
im = ax.imshow(monophone_ppgs, aspect="auto", origin="lower",
interpolation='none')
return ax, im
|
714ccc3e294a5f02983a9aa384c2d6aa313ee4e5
| 3,639,369
|
def is_hex_value(val):
"""
Helper function that returns True if the provided value is an integer in
hexadecimal format.
"""
try:
int(val, 16)
except ValueError:
return False
return True
|
6ba5ac1cfa9b8a4f8397cc52a41694cca33a4b8d
| 3,639,370
|
from typing import Optional
def create_cluster(*, cluster_name: str) -> Optional[Operation]:
"""Create a dataproc cluster """
cluster_client = dataproc.ClusterControllerClient(client_options={"api_endpoint": dataproc_api_endpoint})
cluster = {
"project_id": project_id,
"cluster_name": cluster_name,
"config": {
"config_bucket": config_bucket,
"temp_bucket": temp_bucket,
"master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-2"},
"worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-2"},
},
}
logger.info("cluster: %s is creating now", cluster_name)
operation = cluster_client.create_cluster(request={"project_id": project_id, "region": region, "cluster": cluster})
logger.info("cluster: %s is created successfully", cluster_name)
return operation
|
1657190a7605f28f3c4dd2f2dc6c32230fb44087
| 3,639,371
|
import math
def gc_cache(seq: str) -> Cache:
"""Return the GC ratio of each range, between i and j, in the sequence
Args:
seq: The sequence whose tm we're querying
Returns:
Cache: A cache for GC ratio lookup
"""
n = len(seq)
arr_gc = []
for _ in seq:
arr_gc.append([math.inf] * len(seq))
# fill in the diagonal
for i in range(n):
if i == n - 1: # hackish
arr_gc[i][i] = arr_gc[i - 1][i - 1]
continue
arr_gc[i][i] = 1.0 if seq[i] in "GC" else 0.0
if i == n - 2 and not arr_gc[i][i]: # don't ignore last pair
arr_gc[i][i] = 1.0 if seq[i + 1] in "GC" else 0.0
# fill in the upper right of the array
for i in range(n):
for j in range(i + 1, n):
arr_gc[i][j] = arr_gc[i][j - 1] + arr_gc[j][j]
# convert to ratios
for i in range(n):
for j in range(i, n):
arr_gc[i][j] = round(arr_gc[i][j] / (j - i + 1), 1)
return arr_gc
|
7118cc96d0cd431b720b099b399c64ee419df5aa
| 3,639,372
|
def ParseVariableName(variable_name, args):
"""Parse a variable name or URL, and return a resource.
Args:
variable_name: The variable name.
args: CLI arguments, possibly containing a config name.
Returns:
The parsed resource.
"""
return _ParseMultipartName(variable_name, args,
'runtimeconfig.projects.configs.variables',
'variablesId')
|
1073739195ca1bb0ac427e89e66525a7e7ada40b
| 3,639,373
|
def index(request):
"""Home page"""
return render(request, 'read_only_site/index.html')
|
623c0cdc3229d1873e50ebc3065ca1ba55da50e7
| 3,639,374
|
def parse_calculation_strings_OLD(args):
"""form the strings into arrays
"""
calculations = []
for calculation in args.calculations:
calculation = calculation.split("/")
foreground = np.fromstring(
",".join(calculation[0].replace("x", "0")), sep=",")
background = np.fromstring(
",".join(calculation[1].replace("x", "0")), sep=",")
calculations.append((foreground, background))
args.calculations = calculations
return None
|
04c979cc09bd25d659dad0a96ca89b88b43267cb
| 3,639,375
|
import tempfile
import os
import shutil
def fixture_hdf5_scalar(request):
"""fixture_hdf5_scalar"""
import h5py # pylint: disable=import-outside-toplevel
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "test.h5")
with h5py.File(filename, 'w') as f:
f.create_dataset('int8', data=np.int8(123))
f.create_dataset('int16', data=np.int16(123))
f.create_dataset('int32', data=np.int32(123))
f.create_dataset('int64', data=np.int64(123))
f.create_dataset('float32', data=np.float32(1.23))
f.create_dataset('float64', data=np.float64(1.23))
f.create_dataset('complex64', data=np.complex64(12+3j))
f.create_dataset('complex128', data=np.complex128(12+3j))
f.create_dataset('string', data=np.dtype('<S5').type("D123D"))
args = filename
def func(args):
"""func"""
i8 = tfio.IOTensor.from_hdf5(args)('/int8')
i16 = tfio.IOTensor.from_hdf5(args)('/int16')
i32 = tfio.IOTensor.from_hdf5(args)('/int32')
i64 = tfio.IOTensor.from_hdf5(args)('/int64')
f32 = tfio.IOTensor.from_hdf5(args)('/float32')
f64 = tfio.IOTensor.from_hdf5(args)('/float64')
c64 = tfio.IOTensor.from_hdf5(args)('/complex64')
c128 = tfio.IOTensor.from_hdf5(args)('/complex128')
ss = tfio.IOTensor.from_hdf5(args)('/string')
return [i8, i16, i32, i64, f32, f64, c64, c128, ss]
expected = [
np.int8(123),
np.int16(123),
np.int32(123),
np.int64(123),
np.float32(1.23),
np.float64(1.23),
np.complex64(12+3j),
np.complex128(12+3j),
np.dtype('<S5').type("D123D"),
]
def fin():
shutil.rmtree(tmp_path)
request.addfinalizer(fin)
return args, func, expected
|
6919238dbc879f2bc08c8c397895f890a5c428a0
| 3,639,376
|
def find_border(edge_list) :
"""
find_border(edge_list)
Find the borders of a hexagonal graph
Input
-----
edge_list : array
List of edges of the graph
Returns
-------
border_set : set
Set of vertices of the border
"""
G = nx.Graph([(edge_list[i,0], edge_list[i,1]) for i in range(len(edge_list))])
occurence_list = np.unique(np.reshape(edge_list, 2*len(edge_list)), return_counts=True)
# list of vertex of degree 2
sec_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 2)]
# list of vertex of degree 3
three_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 3)]
sec = np.reshape(sec_edge_list, newshape=(len(sec_edge_list)))
border_set = set(sec)
inner_set = set()
for elem in three_edge_list :
for neigh in G[elem[0]].keys() :
if len(G[neigh]) == 2 :
border_set.add(elem[0])
return border_set
|
718a2b56438caf60d3ca4e3cd7419452c8fbbb63
| 3,639,377
|
from typing import Set
from datetime import datetime
def get_all_files(credentials: Credentials, email: str) -> Set['DriveResult']:
"""Get all files shared with the specified email in the current half-year
(January-June or July-December of the current year)"""
# Create drive service with provided credentials
service = build('drive', 'v3', credentials=credentials, cache_discovery=False)
all_user_files = []
next_page_token = None
date = datetime.date.today()
while True:
# Request the next page of files
metadata, next_page_token = request_files(service, next_page_token, email, date)
all_user_files = all_user_files + metadata
print('\r{} files processed'.format(len(all_user_files)), end='')
# If we have reached the end of the list of documents, next_page_token will be None
if next_page_token is None:
break
return {DriveResult(student_email=file['owners'][0]['emailAddress'],
file_name=file['name'],
create_time=file['createdTime'],
url=file['webViewLink'])
for file in all_user_files}
|
eb7e491cac08bada675f0d39414ae3d907686741
| 3,639,378
|
def _split_kwargs(model, kwargs, lookups=False, with_fields=False):
"""
Split kwargs into fields which are safe to pass to create, and
m2m tag fields, creating SingleTagFields as required.
If lookups is True, TagFields with tagulous-specific lookups will also be
matched, and the returned tag_fields will be a dict of tuples in the
format ``(val, lookup)``
The only tagulous-specific lookup is __exact
For internal use only - likely to change significantly in future versions
Returns a tuple of safe_fields, singletag_fields, tag_fields
If with_fields is True, a fourth argument will be returned - a dict to
look up Field objects from their names
"""
safe_fields = {}
singletag_fields = {}
tag_fields = {}
field_lookup = {}
for field_name, val in kwargs.items():
# Check for lookup
if lookups and "__" in field_name:
orig_field_name = field_name
field_name, lookup = field_name.split("__", 1)
# Only one known lookup
if lookup == "exact":
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# Unknown - pass it on untouched
pass
else:
if isinstance(field, TagField):
# Store for later
tag_fields[field_name] = (val, lookup)
field_lookup[field_name] = field
continue
# Irrelevant lookup - no need to take special actions
safe_fields[orig_field_name] = val
continue
# No lookup
# Try to look up the field
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# Assume it's something clever and pass it through untouched
# If it's invalid, an error will be raised later anyway
safe_fields[field_name] = val
# Next field
continue
field_lookup[field_name] = field
# Take special measures depending on field type
if isinstance(field, SingleTagField):
singletag_fields[field_name] = val
elif isinstance(field, TagField):
# Store for later
if lookups:
tag_fields[field_name] = (val, None)
else:
tag_fields[field_name] = val
else:
safe_fields[field_name] = val
if with_fields:
return safe_fields, singletag_fields, tag_fields, field_lookup
return safe_fields, singletag_fields, tag_fields
|
f73cb84bab0889b51962ed3504b6de265831d18f
| 3,639,379
|
def sliceResultToBytes(sr):
"""Copies a FLSliceResult to a Python bytes object. Does not free the FLSliceResult."""
if sr.buf == None:
return None
lib.FLSliceResult_Release(sr)
b = bytes( ffi.buffer(sr.buf, sr.size) )
return b
|
0e2207a99749b4cd3df4b71ca7338de4c0ad6a06
| 3,639,380
|
def cycle_dual(G, cycles, avg_fun=None):
"""
Returns dual graph of cycle intersections, where each edge
is defined as one cycle intersection of the original graph
and each node is a cycle in the original graph.
The general idea of this algorithm is:
* Find all cycles which share edges by an efficient dictionary
operation
* Those edges which border on exactly two cycles are connected
The result is a possibly disconnected version of the dual
graph which can be further processed.
The naive algorithm is O(n_cycles^2) whereas this improved
algorithm is better than O(n_cycles) in the average case.
"""
if avg_fun == None:
avg_fun = lambda c, w: average(c, weights=w)
dual = nx.Graph()
neighbor_cycles = find_neighbor_cycles(G, cycles)
# Construct dual graph
for ns in neighbor_cycles:
# Add cycles
for c, n in ((cycles[n], n) for n in ns):
dual.add_node(n, x=c.com[0], y=c.com[1], cycle=c, \
external=False, cycle_area=c.area())
# Connect pairs
if len(ns) == 2:
a, b = ns
c_a = cycles[a]
c_b = cycles[b]
sect = c_a.intersection(c_b)
wts = [G[u][v]['weight'] for u, v in sect]
conds = [G[u][v]['conductivity'] for u, v in sect]
wt = sum(wts)
#cond = average(conds, weights=wts)
#cond = min(conds)
cond = avg_fun(conds, wts)
dual.add_edge(a, b, weight=wt,
conductivity=cond, intersection=sect)
return dual
|
a923a4cea0f1d158e6936a68e513bd2285ea6b15
| 3,639,381
|
from sys import path
import os
import shutil
def main():
"""Entry point"""
if check_for_unstaged_changes(TARGET_FILE):
print("ERROR: You seem to have unstaged changes to %s that would be overwritten."
% (TARGET_FILE))
print("Please clean, commit, or stash them before running this script.")
return 1
if not path.exists(path.dirname(TARGET_FILE)):
os.makedirs(path.dirname(TARGET_FILE))
shutil.copyfile(ORIGIN_FILE, TARGET_FILE)
print("Bootstrapping optdata is complete.")
for tool in TOOL_LIST:
for arch in ARCH_LIST:
optdata_dir = get_optdata_dir(tool, arch)
print(" * Copy %s %s files into: %s" % (arch, tool, optdata_dir))
print("NOTE: Make sure to add 'skiprestoreoptdata' as a switch on the build command line!")
return 0
|
0c628c917e596d3e1283dd729eac13d9a23a2d42
| 3,639,382
|
def get_timebucketedlog_reader(log, event_store):
"""
:rtype: TimebucketedlogReader
"""
return TimebucketedlogReader(log=log, event_store=event_store)
|
676e38a446f60dd8f2c90b38df572b2f5fc9c21e
| 3,639,383
|
def get_database_name(url):
"""Return a database name in a URL.
Example::
>>> get_database_name('http://foobar.com:5984/testdb')
'testdb'
:param str url: The URL to parse.
:rtype: str
"""
name = compat.urlparse(url).path.strip("/").split("/")[-1]
# Avoid re-encoding the name
if "%" not in name:
name = encode_uri_component(name)
return name
|
2916e5a5999aae68b018858701dfb5e695857f7f
| 3,639,384
|
def get_tags():
"""
在这里希望根据用户来获取,和用户有关的tag
所以我们需要做的是,获取用户所有的post,然后找到所有的tag
:return:
"""
result_tags = []
# 找到某个用户的所有的文章,把所有文章的Tag都放在一块
def append_tag(user_posts):
tmp = []
for post in user_posts:
for tag in post.tags.all():
tmp.append(tag.tag_name)
return tmp
# 如果当前用户存在,就是用当前用户
if g.get('current_user', None):
user_posts_ = g.current_user.posts.all()
result_tags.extend(append_tag(user_posts_))
# 如果不存在,就是用默认用户
else:
user = User.query.get(1)
result_tags.extend(append_tag(user.posts.all()))
result_tags = list(set(result_tags))
return jsonify(result_tags)
|
821ca1bb222e4fe15ea336282fed0eb172d460f9
| 3,639,385
|
import os
def get_selinux_modules():
"""
Read all custom SELinux policy modules from the system
Returns 3-tuple (modules, retain_rpms, install_rpms)
where "modules" is a list of "SELinuxModule" objects,
"retain_rpms" is a list of RPMs that should be retained
during the upgrade and "install_rpms" is a list of RPMs
that should be installed during the upgrade
"""
modules = list_selinux_modules()
semodule_list = []
# list of rpms containing policy modules to be installed on RHEL 8
retain_rpms = []
install_rpms = []
# modules need to be extracted into cil files
# cd to /tmp/selinux and save working directory so that we can return there
# clear working directory
rmtree(WORKING_DIRECTORY, ignore_errors=True)
try:
wd = os.getcwd()
os.mkdir(WORKING_DIRECTORY)
os.chdir(WORKING_DIRECTORY)
except OSError:
api.current_logger().warning("Failed to access working directory! Aborting.")
return ([], [], [])
for (name, priority) in modules:
if priority == "200":
# Module on priority 200 was installed by an RPM
# Request $name-selinux to be installed on RHEL8
retain_rpms.append(name + "-selinux")
continue
if priority == "100":
# module from selinux-policy-* package - skipping
continue
# extract custom module and save it to SELinuxModule object
module_file = name + ".cil"
try:
run(['semodule', '-c', '-X', priority, '-E', name])
# check if the module contains invalid types and remove them if so
removed = check_module(module_file)
# get content of the module
try:
with open(module_file, 'r') as cil_file:
module_content = cil_file.read()
except OSError as e:
api.current_logger().warning("Error reading %s.cil : %s", name, str(e))
continue
semodule_list.append(SELinuxModule(
name=name,
priority=int(priority),
content=module_content,
removed=removed
)
)
except CalledProcessError:
api.current_logger().warning("Module %s could not be extracted!", name)
continue
# rename the cil module file so that it does not clash
# with the same module on different priority
try:
os.rename(module_file, "{}_{}".format(name, priority))
except OSError:
api.current_logger().warning("Failed to rename module file %s to include priority.", name)
# this is necessary for check if container-selinux needs to be installed
try:
run(['semanage', 'export', '-f', 'semanage'])
except CalledProcessError:
pass
# Check if modules contain any type, attribute, or boolean contained in container-selinux and install it if so
# This is necessary since container policy module is part of selinux-policy-targeted in RHEL 7 (but not in RHEL 8)
try:
run(['grep', '-w', '-r', '-E', "|".join(CONTAINER_TYPES)], split=False)
# Request "container-selinux" to be installed since container types where used in local customizations
# and container-selinux policy was removed from selinux-policy-* packages
install_rpms.append("container-selinux")
except CalledProcessError:
# expected, ignore exception
pass
try:
os.chdir(wd)
except OSError:
pass
rmtree(WORKING_DIRECTORY, ignore_errors=True)
return (semodule_list, list(set(retain_rpms)), list(set(install_rpms)))
|
20c548c3f2227551a51ce774014d63754251e1e6
| 3,639,386
|
def a_star_search(graph, start, goal):
"""Runs an A* search on the specified graph to find a path from the ''start'' node to the ''goal'' node.
Returns a list of nodes specifying a minimal path between the two nodes.
If no path exists (disconnected components), returns an empty list.
"""
all_nodes = graph.get_all_node_ids()
if start not in all_nodes:
raise NonexistentNodeError(start)
if goal not in all_nodes:
raise NonexistentNodeError(goal)
came_from, cost_so_far, goal_reached = _a_star_search_internal(graph, start, goal)
if goal_reached:
path = reconstruct_path(came_from, start, goal)
path.reverse()
return path
else:
return []
|
f2eabef1e30f12460359ea45cbc089f8fb28e5f9
| 3,639,387
|
import click
def output_format_option(default: OutputFormat = OutputFormat.TREE):
"""
A ``click.option`` for specifying a format to use when outputting data.
Args:
default (:class:`~ape.cli.choices.OutputFormat`): Defaults to ``TREE`` format.
"""
return click.option(
"--format",
"output_format",
type=output_format_choice(),
default=default.value,
callback=lambda ctx, param, value: OutputFormat(value.upper()),
)
|
9f73a8b8d270975d16ec9d3b2962f4fd61491aab
| 3,639,388
|
def compute_errors(u_e, u):
"""Compute various measures of the error u - u_e, where
u is a finite element Function and u_e is an Expression.
Adapted from https://fenicsproject.org/pub/tutorial/html/._ftut1020.html
"""
print('u_e',u_e.ufl_element().degree())
# Get function space
V = u.function_space()
# Explicit computation of L2 norm
error = (u - u_e)**2*dl.dx
E1 = np.sqrt(abs(dl.assemble(error)))
# Explicit interpolation of u_e onto the same space as u
u_e_ = dl.interpolate(u_e, V)
error = (u - u_e_)**2*dl.dx
E2 = np.sqrt(abs(dl.assemble(error)))
# Explicit interpolation of u_e to higher-order elements.
# u will also be interpolated to the space Ve before integration
Ve = dl.FunctionSpace(V.mesh(), 'P', 5)
u_e_ = dl.interpolate(u_e, Ve)
error = (u - u_e)**2*dl.dx
E3 = np.sqrt(abs(dl.assemble(error)))
# Infinity norm based on nodal values
u_e_ = dl.interpolate(u_e, V)
E4 = abs(u_e_.vector().get_local() - u.vector().get_local()).max()
# L2 norm
E5 = dl.errornorm(u_e, u, norm_type='L2', degree_rise=3)
# H1 seminorm
E6 = dl.errornorm(u_e, u, norm_type='H10', degree_rise=3)
# Collect error measures in a dictionary with self-explanatory keys
errors = {'u - u_e': E1,
'u - interpolate(u_e, V)': E2,
'interpolate(u, Ve) - interpolate(u_e, Ve)': E3,
'infinity norm (of dofs)': E4,
'L2 norm': E5,
'H10 seminorm': E6}
return errors
|
c9fbd459ab1c3cd65fb4d290e1399dd4937ed5a2
| 3,639,389
|
def list_to_str(input_list, delimiter=","):
"""
Concatenates list elements, joining them by the separator specified by the
parameter "delimiter".
Parameters
----------
input_list : list
List with elements to be joined.
delimiter : String, optional, default ','.
The separator used between elements.
Returns
-------
String
Returns a string, resulting from concatenation of list's elements, separeted by the delimiter.
"""
return delimiter.join(
[x if isinstance(x, str) else repr(x) for x in input_list]
)
|
4decfbd5a9d637f27473ec4a917998137af5ffe0
| 3,639,390
|
def strategy_supports_no_merge_call():
"""Returns if the current `Strategy` can operate in pure replica context."""
if not distribution_strategy_context.has_strategy():
return True
strategy = distribution_strategy_context.get_strategy()
return not strategy.extended._use_merge_call() # pylint: disable=protected-access
|
dc2b609a52d7e25b372e0cd1a04a0637d76b8ec1
| 3,639,391
|
def is_group(obj):
"""Returns true if the object is a h5py-like group."""
kind = get_h5py_kind(obj)
return kind in ["file", "group"]
|
37c86b6d4f052eab29106b9d51c17cdd36b1dc98
| 3,639,392
|
from bs4 import BeautifulSoup
def analyze_page(page_url):
""" Analyzes the content at page_url and returns a list of the highes weighted
words.json/phrases and their weights """
html = fetch_html(page_url)
if not html:
return
soup = BeautifulSoup(html, "html.parser")
word_counts = {}
url_words = words_in_url(page_url)
stop_words = get_stop_words('english')
words_to_add = ['like', '...']
stop_words = stop_words + words_to_add
ignore_tags = ["script", "img", "meta", "style"] # html tags to ignore
weights = {'title': 15, 'div': .5, 'a': .3, 'span': .5, "link": .2, 'url': 22, \
'two' : 3, 'three': 3, 'four': 5, 'five': 5} # adjust weights here
lemma = WordNetLemmatizer()
for tag in soup.find_all():
if tag.name not in ignore_tags:
words = tag.find(text=True,
recursive=False) # with bs4, recursive = False means we will not be double counting tags
if words:
words = words.split()
words = [w for w in words if w not in stop_words] # remove common stop words.json
words = [w for w in words if len(w) > 1] # ignore single character words.json
for index, word in enumerate(words):
word_lower = lemma.lemmatize(word.lower()) # lemmatize/stem words.json
multiplier = 1
if tag.name in weights: # assign weight based on HTML tag
multiplier = weights[tag.name]
if word_lower in word_counts:
word_counts[word_lower] = word_counts[word_lower] + (1 * multiplier)
else:
word_counts[word_lower] = 1 * multiplier
if index < (len(words) - 1): # two word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['two'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 2): # three word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['three'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 3): # four word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 3]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['four'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
if index < (len(words) - 4): # five word phrase
two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 3]).lower()).strip() \
+ ' ' + lemma.lemmatize((words[index + 4]).lower()).strip()
two_word = two_word.strip()
if two_word != word_lower:
if two_word in word_counts:
word_counts[two_word] = word_counts[two_word] + (weights['five'] * multiplier)
else:
word_counts[two_word] = 1 * multiplier
for word in url_words: # add weight for words.json in the url string
if word in word_counts:
word_counts[word] = word_counts[word] + weights['url']
def determine(x, top_25):
""" Helper function for removing phrases that are substrings of other phrases """
if len(x[0].split()) > 1:
# print(x[0])
for i in top_25:
if x[0] in i[0] and x[0] != i[0]:
return False
return True
top_25 = list(reversed(sorted(word_counts.items(), key=lambda x: x[1])[-25:])) # grab highest 25 weighted items
final_list = [x for x in top_25 if determine(x, top_25)] # remove phrases that are substrings of other phrases
return final_list
|
55928add263defa51a171a2dfb20bffe6491430c
| 3,639,393
|
from typing import Iterable
from typing import List
def load_config_from_paths(config_paths: Iterable[str], strict: bool = False) -> List[dict]:
"""
Load configuration from paths containing \*.yml and \*.json files.
As noted in README.config, .json will take precedence over .yml files.
:param config_paths: Path to \*.yml and \*.json config files.
:param strict: Set to true to error if the file is not found.
:return: A list of configs in increasing order of precedence.
"""
# Put the .json configs after the .yml configs to make sure .json takes
# precedence over .yml.
sorted_paths = sorted(config_paths, key=lambda x: x.endswith(".json"))
return list(map(lambda path: load_config_from_file(path, strict), sorted_paths))
|
8e32c46e7e620ae02dffcc652b32bb0098a0a2b3
| 3,639,394
|
from typing import List
def sort_flats(flats_unsorted: List[arimage.ARImage]):
""" Sort flat images into a dictionary with "filter" as the key """
if bool(flats_unsorted) == False:
return None
flats = { }
logger.info("Sorting flat images by filter")
for flat in flats_unsorted:
fl = flat.filter
if fl not in flats:
# Found a flat with a new filter
# Create a new array in the dictionary
logger.info("Found a flat with filter=" + fl)
flats[fl] = []
flats[fl].append(flat)
return flats
|
d0e3fe2c7e1a8f34cf7ed8f6985d3dd7bc82f3f1
| 3,639,395
|
import concurrent
import logging
def run_in_parallel(function, list_of_kwargs_to_function, num_workers):
"""Run a function on a list of kwargs in parallel with ThreadPoolExecutor.
Adapted from code by mlbileschi.
Args:
function: a function.
list_of_kwargs_to_function: list of dictionary from string to argument
value. These will be passed into `function` as kwargs.
num_workers: int.
Returns:
list of return values from function.
"""
if num_workers < 1:
raise ValueError(
'Number of workers must be greater than 0. Was {}'.format(num_workers))
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
futures = []
logging.info(
'Adding %d jobs to process pool to run in %d parallel '
'threads.', len(list_of_kwargs_to_function), num_workers)
for kwargs in list_of_kwargs_to_function:
f = executor.submit(function, **kwargs)
futures.append(f)
for f in concurrent.futures.as_completed(futures):
if f.exception():
# Propagate exception to main thread.
raise f.exception()
return [f.result() for f in futures]
|
24b99f68ba1221c4f064a65540e6c165c9474e43
| 3,639,396
|
import os
import requests
def upload(host, key, path):
""" Upload one file at a time """
url= urljoin(host, 'api/files?key=' + key)
os.chdir(path[0])
f = open(path[1], 'rb')
r = requests.post(url, files={"File" : f})
r.raise_for_status()
return r.json()['id']
|
78e8cac5239d631f5dea8dae0bb0a52e43e1b307
| 3,639,397
|
def show_project(project_id):
"""return a single project formatted according to Swagger spec"""
try:
project = annif.project.get_project(
project_id, min_access=Access.hidden)
except ValueError:
return project_not_found_error(project_id)
return project.dump()
|
3f7108ec7cb27270f91517bef194f3514c3eb4e5
| 3,639,398
|
def pollard_rho(n: int, e: int, seed: int = 2) -> int:
"""
Algoritmo de Pollard-Rho para realizar a quebra de chave na criptografia RSA.
n - n da chave pública
e - e da chave pública
seed - valor base para executar o ciclo de testes
"""
a, b = seed, seed
p = 1
while (p == 1):
a = ( pow(a,2) + 1 ) % n
b = ( pow(b,2) + 1 )
b = ( pow(b,2) + 1 ) % n
p = gcd( abs(a-b)%n, n)
if p == n:
return pollard_rho(n, e, seed+1) #brutal_force(n, e,) #
else:
q = n // p
phi = (p - 1) * (q - 1)
d = find_inverse(e, phi)
return d
|
4870627a5fca863d4110f3cadfdc1e7b618c2a48
| 3,639,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.