content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import click
import yaml
import os
import csv
import copy
import time
def upload_subjects(
subject_set_id,
manifest_files,
allow_missing,
remote_location,
mime_type,
file_column,
):
"""
Uploads subjects from each of the given MANIFEST_FILES.
Example with only local files:
$ panoptes subject-set upload-subjects 4667 manifest.csv
Local filenames will be automatically detected in the manifest and
uploaded, or filename columns can be specified with --file-column.
If you are hosting your media yourself, you can put the URLs in the
manifest and specify the column number(s):
$ panoptes subject-set upload-subjects -r 1 4667 manifest.csv
$ panoptes subject-set upload-subjects -r 1 -r 2 4667 manifest.csv
Any local files will still be detected and uploaded.
"""
if (
len(manifest_files) > 1
and any(map(lambda m: m.endswith('.yaml'), manifest_files))
):
click.echo(
'Error: YAML manifests must be processed one at a time.',
err=True,
)
return -1
elif manifest_files[0].endswith('.yaml'):
with open(manifest_files[0], 'r') as yaml_manifest:
upload_state = yaml.load(yaml_manifest, Loader=yaml.FullLoader)
if upload_state['state_version'] > CURRENT_STATE_VERSION:
click.echo(
'Error: {} was generated by a newer version of the Panoptes '
'CLI and is not compatible with this version.'.format(
manifest_files[0],
),
err=True,
)
return -1
if upload_state['subject_set_id'] != subject_set_id:
click.echo(
'Warning: You specified subject set {} but this YAML '
'manifest is for subject set {}.'.format(
subject_set_id,
upload_state['subject_set_id'],
),
err=True,
)
click.confirm(
'Upload {} to subject set {} ({})?'.format(
manifest_files[0],
subject_set_id,
SubjectSet.find(subject_set_id).display_name,
),
abort=True
)
upload_state['subject_set_id'] = subject_set_id
resumed_upload = True
else:
upload_state = {
'state_version': CURRENT_STATE_VERSION,
'subject_set_id': subject_set_id,
'manifest_files': manifest_files,
'allow_missing': allow_missing,
'remote_location': remote_location,
'mime_type': mime_type,
'file_column': file_column,
'waiting_to_upload': [],
'waiting_to_link': {},
}
resumed_upload = False
remote_location_count = len(upload_state['remote_location'])
mime_type_count = len(upload_state['mime_type'])
if remote_location_count > 1 and mime_type_count == 1:
upload_state['mime_type'] = (
upload_state['mime_type'] * remote_location_count
)
elif remote_location_count > 0 and mime_type_count != remote_location_count:
click.echo(
'Error: The number of MIME types given must be either 1 or equal '
'to the number of remote locations.',
err=True,
)
return -1
def validate_file(file_path):
if not os.path.isfile(file_path):
click.echo(
'Error: File "{}" could not be found.'.format(
file_path,
),
err=True,
)
return False
file_size = os.path.getsize(file_path)
if file_size == 0:
click.echo(
'Error: File "{}" is empty.'.format(
file_path,
),
err=True,
)
return False
elif file_size > MAX_UPLOAD_FILE_SIZE:
click.echo(
'Error: File "{}" is {}, larger than the maximum {}.'.format(
file_path,
humanize.naturalsize(file_size),
humanize.naturalsize(MAX_UPLOAD_FILE_SIZE),
),
err=True,
)
return False
return True
def get_index_fields(headers):
index_fields = [header.lstrip('%') for header in headers if header.startswith('%')]
return ",".join(str(field) for field in index_fields)
subject_set = SubjectSet.find(upload_state['subject_set_id'])
if not resumed_upload:
subject_rows = []
for manifest_file in upload_state['manifest_files']:
with open(manifest_file, 'U') as manifest_f:
file_root = os.path.dirname(manifest_file)
r = csv.reader(manifest_f, skipinitialspace=True)
headers = next(r)
# update set metadata for indexed sets
index_fields = get_index_fields(headers)
if index_fields:
subject_set.metadata['indexFields'] = index_fields
subject_set.save()
# remove leading % from subject metadata headings
cleaned_headers = [header.lstrip('%') for header in headers]
for row in r:
metadata = dict(zip(cleaned_headers, row))
files = []
if not upload_state['file_column']:
upload_state['file_column'] = []
for field_number, col in enumerate(row, start=1):
file_path = os.path.join(file_root, col)
if os.path.exists(file_path):
upload_state['file_column'].append(
field_number,
)
if not validate_file(file_path):
return -1
files.append(file_path)
else:
for field_number in upload_state['file_column']:
file_path = os.path.join(
file_root,
row[field_number - 1]
)
if not validate_file(file_path):
return -1
files.append(file_path)
for field_number, _mime_type in zip(
upload_state['remote_location'],
upload_state['mime_type'],
):
files.append({_mime_type: row[field_number - 1]})
if len(files) == 0:
click.echo(
'Could not find any files in row:',
err=True,
)
click.echo(','.join(row), err=True)
if not upload_state['allow_missing']:
return -1
else:
continue
subject_rows.append((files, metadata))
if not subject_rows:
click.echo(
'File {} did not contain any rows.'.format(
manifest_file,
),
err=True,
)
return -1
subject_rows = list(enumerate(subject_rows))
upload_state['waiting_to_upload'] = copy.deepcopy(subject_rows)
else:
for subject_id, subject_row in upload_state['waiting_to_link'].items():
try:
subject = Subject.find(subject_id)
except PanoptesAPIException:
upload_state['waiting_to_upload'].append(subject_row)
del upload_state['waiting_to_link'][subject_id]
subject_rows = copy.deepcopy(upload_state['waiting_to_upload'])
pending_subjects = []
def move_created(limit):
while len(pending_subjects) > limit:
for subject, subject_row in pending_subjects:
if subject.async_save_result:
pending_subjects.remove((subject, subject_row))
upload_state['waiting_to_upload'].remove(subject_row)
upload_state['waiting_to_link'][subject.id] = subject_row
time.sleep(0.5)
def link_subjects(limit):
if len(upload_state['waiting_to_link']) > limit:
subject_set.add(list(upload_state['waiting_to_link'].keys()))
upload_state['waiting_to_link'].clear()
with click.progressbar(
subject_rows,
length=len(subject_rows),
label='Uploading subjects',
) as _subject_rows:
try:
with Subject.async_saves():
for subject_row in _subject_rows:
count, (files, metadata) = subject_row
subject = Subject()
subject.links.project = subject_set.links.project
for media_file in files:
subject.add_location(media_file)
subject.metadata.update(metadata)
subject.save()
pending_subjects.append((subject, subject_row))
move_created(MAX_PENDING_SUBJECTS)
link_subjects(LINK_BATCH_SIZE)
move_created(0)
link_subjects(0)
finally:
if (
len(pending_subjects) > 0
or len(upload_state['waiting_to_link']) > 0
):
click.echo('Error: Upload failed.', err=True)
if click.confirm(
'Would you like to save the upload state to resume the '
'upload later?',
default=True,
):
while True:
state_file_name = 'panoptes-upload-{}.yaml'.format(
subject_set_id,
)
state_file_name = click.prompt(
'Enter filename to save to',
default=state_file_name,
)
if not state_file_name.endswith('.yaml'):
click.echo(
'Error: File name must end in ".yaml".',
err=True,
)
if click.confirm(
'Save to {}.yaml?'.format(state_file_name),
default=True,
):
state_file_name += '.yaml'
else:
continue
if not is_valid_filename(state_file_name):
click.echo(
'Error: {} is not a valid file name'.format(
state_file_name,
),
err=True,
)
sanitized_filename = sanitize_filename(
state_file_name,
)
if click.confirm(
'Save to {}?'.format(
sanitized_filename,
),
default=True,
):
state_file_name = sanitized_filename
else:
continue
if os.path.exists(state_file_name):
if not click.confirm(
'File {} already exists. Overwrite?'.format(
state_file_name,
),
default=False,
):
continue
break
with open(state_file_name, 'w') as state_file:
yaml.dump(upload_state, state_file) | 11d53ad1593250f73085bcf8c756b06dd2c71f70 | 3,634,000 |
def get_next_prev(qt_id, topic_id):
""" Find the "next" and "previous" qtemplates, by topic, position. """
if not topic_id:
return None, None
# This is very inefficient, but with the way questions are stored,
# I didn't see a better way. Could maybe be revisited some time?
questionlist = General.get_q_list(topic_id, numdone=False)
if questionlist:
# Filter out the questions without a positive position
questionlist = [question
for question in questionlist
if question['position'] > 0]
else:
questionlist = []
# We need to step through the list finding the "next and previous" id's
nextid = None
foundprev = None
previd = None
foundcurrent = None
for i in questionlist:
if foundcurrent:
nextid = int(i['qtid'])
break
if int(i['qtid']) == int(qt_id):
foundprev = True
foundcurrent = True
if not foundprev:
previd = int(i['qtid'])
# previd and nextid should now contain the correct values
# or None, if they are not valid (current_qtid is the first or
# last question)
return previd, nextid | 0b1d13eb79864b24b293ee7d85816b9d03c949e0 | 3,634,001 |
def get_text_and_video_items(full_dict):
"""
Return new OrderedDict filtered by hasaudio attribute = False
"""
items_by_has_not_audio = {
k: v
for k,v in full_dict.items() if not v[dk.hasAudio]}
return items_by_has_not_audio | 63300c73d1e7d4202900a303716b471a0edb9ed9 | 3,634,002 |
def svm_predict(model, samples):
"""Predicts the response based on the trained model"""
return model.predict(samples)[1].ravel() | a510a64e602bbe14a3aa192cacd11b996704d91e | 3,634,003 |
import ast
from typing import List
from typing import Tuple
def _get_sim300(node: ast.Compare) -> List[Tuple[int, int, str]]:
"""
Get a list of all Yoda conditions.
Compare(
left=Constant(value='Yoda', kind=None),
ops=[Eq()],
comparators=[Name(id='i_am', ctx=Load())],
)
"""
errors: List[Tuple[int, int, str]] = []
if not (
isinstance(node.left, AST_CONST_TYPES)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
):
return errors
left = to_source(node.left)
is_py37_str = isinstance(node.left, ast.Str)
is_py38_str = isinstance(node.left, ast.Constant) and isinstance(
node.left.value, str
)
if is_py38_str or is_py37_str:
left = f'"{left}"'
right = to_source(node.comparators[0])
errors.append(
(node.lineno, node.col_offset, SIM300.format(left=left, right=right))
)
return errors | 47db7b6e292ec49855d42d7b51c52ed906d3749d | 3,634,004 |
def dbl_cos_fit_func(p, x):
# the frequency is fixed
"""
A double sinus (fundamental + 1st harmonic) used as a fit function
"""
startidx = 0
base = 0
if len(p) % 2 != 0:
base = p[0]
startidx = 1
first_harm = p[startidx] * np.cos(
2 * np.pi * x + 2 * np.pi * p[startidx + 1]
)
second_harm = p[startidx + 2] * np.cos(
4.0 * np.pi * x + 4 * np.pi * p[startidx + 3]
)
return base + first_harm + second_harm | 7a8c816c6d47703ac32c85296dcc554f4e7c6f0d | 3,634,005 |
def __none_to_zero(string):
"""
Return '0' if the string is "none" or "null";
return the string itself otherwise.
@type string: C{string}
@param string: The string to test for values of "none" or "null".
@rtype: C{string}
@return: '0' if the string is "none" or "null", the string itself
otherwise.
"""
if (string.lower() == "none" or string.lower() == "null"):
return '0'
else:
return string | 45da6720f4e8b6047e161dfe985421c8c7b37a38 | 3,634,006 |
def get_homepage_header():
""" Returns the page header image of the homepage """
homepage_id = get_homepage_id()
if homepage_id is None:
return None
return get_page_header(homepage_id) | 17ec01907c1a735964e7da8e2016ef36b414878c | 3,634,007 |
from typing import List
from typing import Dict
import ast
def trigger_data_load(
regions: List[str],
cluster_config_path: str,
default_config_path: str,
env_config_path: str,
input_vars: Dict[str, str],
) -> dict:
"""
:param regions: AWS regions in which the EMR job needs to be triggered
:type regions: list
:param cluster_config_path: EMR cluster configuration file path
:type cluster_config_path: str
:param default_config_path: Default configuration file path
:type default_config_path: str
:param env_config_path: Environment specific configuration file path
:type env_config_path: str
:param input_vars: Input variables which serve as parameters for the configuration files
:type input_vars: dict
:return:
"""
config_parser = ConfigParser()
config_parser.load_configuration(
cluster_config_path, default_config_path, env_config_path, input_vars
)
for region in regions:
input_vars["REGION"] = region # expose runtime information
if region in config_parser.flow_config:
emr_client = SESSION.client(service_name="emr", region_name=region)
emr_launcher = EmrLauncher(emr_client)
runtime_flow_config = ast.literal_eval(
substitute_placeholders_with_values(
str(config_parser.flow_config), input_vars
)
)
response = emr_launcher.create_cluster(
flow_config=runtime_flow_config,
cluster_config=config_parser.cluster_config,
region=region,
)
for step in runtime_flow_config["Steps"]:
emr_launcher.add_step(
step_name=step.get("Name"),
action_on_failure=step.get("ActionOnFailure"),
args=step.get("Args"),
)
return response
else:
raise Exception("Instance config missing for region {}".format(region)) | 9f51c3284de2acb65105615ec40b2611603d7a24 | 3,634,008 |
def _test_afqt(df):
""" NLSY provides percentile information for AFQT scores, reconstructed here
as a check based on NLSY instructions.
"""
# Breaking the logic of the code a bit, copies of the object are drawn from here.
df_internal = df.copy(deep=True)
# Adjust for missing values here, even though this is also done later in the code
# for all variables.
for label in ['AFQT_RAW', 'AFQT_1']:
cond = (df_internal[label] < 0)
df_internal.loc[cond, label] = np.nan
# Match ``AFQT_RAW`` to percentile of distribution
cond = df_internal['AFQT_RAW'] <= 23.5
df_internal.loc[cond, 'AFQT_PERCENTILES'] = 1
infos = []
infos += [(23.50, 27.00, 2), (27.00, 29.50, 3), (29.50, 32.00, 4), (32.00, 34.00, 5)]
infos += [(34.00, 36.50, 6), (36.50, 38.00, 7), (38.00, 40.00, 8), (40.00, 41.00, 9)]
infos += [(41.00, 42.50, 10), (42.50, 44.00, 11), (44.00, 45.50, 12), (45.50, 47.00, 13)]
infos += [(47.00, 48.50, 14), (48.50, 49.50, 15), (49.50, 51.00, 16), (51.00, 52.50, 17)]
for i in range(18, 29):
infos += [(i + 34.50, i + 35.50, i)]
infos += [(63.50, 64.00, 29), (64.00, 65.00, 30), (65.00, 65.50, 31), (65.50, 66.50, 32)]
infos += [(66.50, 67.00, 33), (67.00, 67.50, 34), (67.50, 68.50, 35), (68.50, 69.00, 36)]
infos += [(69.00, 69.50, 37), (69.50, 70.50, 38), (70.50, 71.00, 39), (71.00, 71.50, 40)]
infos += [(71.50, 72.00, 41), (72.00, 73.00, 42), (73.00, 73.50, 43), (73.50, 74.00, 44)]
infos += [(74.00, 74.50, 45), (74.50, 75.50, 46), (75.50, 76.00, 47), (76.00, 76.50, 48)]
infos += [(76.50, 77.50, 49)]
for i, j in enumerate(range(50, 62), 1):
infos += [(j + 28.00 - 0.50 * i, j + 28.00, j)]
for i, j in enumerate(range(62, 94), 1):
infos += [(j + 21.50 - 0.50 * i, j + 21.50, j)]
infos += [(99.00, 100.00, 94)]
for i, j in enumerate(range(95, 98), 1):
infos += [(j + 5.50 - 0.50 * i, j + 5.50, j)]
infos += [(101.50, 102.50, 98), (102.5, 105.00, 99)]
for info in infos:
lower, upper, value = info
cond = (df_internal['AFQT_RAW'] > lower) & (df_internal['AFQT_RAW'] <= upper)
df_internal.loc[cond, 'AFQT_PERCENTILES'] = value
return df_internal['AFQT_PERCENTILES'].equals(df_internal['AFQT_1']) | 97937957fc8d8782dde655fbf5f063264fe2e575 | 3,634,009 |
import csv
def _load_roiscsv(fp):
"""
Loads the specified ROIs CSV file.
:param fp: the file object for the ROIs CSV data to load
:type fp: file
:return: the list of predictions
:rtype: list
"""
result = []
reader = csv.DictReader(fp)
for i, row in enumerate(reader):
# score
score = 1.0
if "score" in row:
score = float(row["score"])
# label
label = ""
if "label_str" in row:
label = row["label_str"]
# coordinates
coords = None
if "x0" in row:
coords = (int(float(row["x0"])), int(float(row["y0"])), int(float(row["x1"])), int(float(row["y1"])))
if "x" in row:
x = int(float(row["x"]))
y = int(float(row["y"]))
coords = (x, y, x + int(float(row["w"])) - 1, y + int(float(row["h"])) - 1)
p = Prediction(i, label, score, coords=coords)
result.append(p)
return result | 13c697e224aba1562d15b39dac18c932a2b2fa0e | 3,634,010 |
def argmin(array):
""" Return the index to the maximum element of an array """
return min(zip(array, xrange(len(array))))[1] | 17e30a433d20eeef8d5a3cc48361245ba9ad9328 | 3,634,011 |
def braid_group_rep_loss(input_dim=1):
"""
Purpose
-------
loss for the braid group. When the loss is minimal, the braid group relations are satisfied for the generator R_op.
Parameters
----------
input_dim, the dimension of the R_op generator for the braid group.
"""
def loss(y_true,y_pred):
equation_1_out=tf.slice(y_pred,[0,0],[-1,3*input_dim])
equation_2_out=tf.slice(y_pred,[0,3*input_dim],[-1,3*input_dim])
final_R_2_out_1=tf.slice(y_pred,[0,6*input_dim],[-1,2*input_dim])
final_R_2_out_2=tf.slice(y_pred,[0,8*input_dim],[-1,2*input_dim])
A=K.mean(math_ops.square(equation_1_out - equation_2_out), axis=-1) # YangBaxter
B=K.mean(math_ops.square(y_true-final_R_2_out_1), axis=-1) # R2 moves
C=K.mean(math_ops.square(y_true-final_R_2_out_2), axis=-1) # R2 moves
return A+B+C
return loss | 1d76f8a950ba997bc5a692cb56daa413d853491e | 3,634,012 |
def strains():
"""
Endpoint that returns a list of all available strains.
Returns
-------
strains : JSON
Returns a JSON array of all available strains.
"""
try:
strains = df2.to_json(orient="records")
except Exception as e:
raise e
return strains | 793a8cc692247c6a9b930f69a2956620b23f5809 | 3,634,013 |
def _number_of_digits(number: int) -> int:
""" Returns the number of digits in the given number """
return int(log10(number)) + 1 | c3270c53516793345ce2b96dbc205ccbbca3adf2 | 3,634,014 |
import glob
import os
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s'%(d, e)) if os.path.isfile(f)]) | b1afcf1698a2991001c480cc009bbe1858ce8120 | 3,634,015 |
def get_document(doc_slug: str) -> QuerySet:
""" Возвращает документ по слагу. """
return models.Document.objects.filter(
slug=doc_slug
).select_related('category', 'publisher') | 4c169b3ba1b4486a4c5c4c851dfa4e2efdb6ca07 | 3,634,016 |
def currentsellings():
"""shows a list of the ites that the user is currently selling"""
items = Item.query.filter(
(Item.user_id == session["user_id"]) & (Item.sold == 0)).all()
return render_template("currentsellings.html", items=items) | 6aa8dd754fda4b566f15909a556e9ca1678f7a7e | 3,634,017 |
def catches(raisable: Raisable, catchable: Catchable):
"""
Tests if raisable value would be catchable by catchable value.
"""
if isinstance(catchable, type):
catchable = [catchable]
if isinstance(raisable, type):
return any(issubclass(raisable, exc) for exc in catchable)
else:
return any(isinstance(raisable, exc) for exc in catchable) | 14ddb2465e618ba090e0885edf1141bd9657ac2b | 3,634,018 |
def bootstrap_test(
stat_val,
bootstrap_estimates,
nobs,
stat_val_control,
bootstrap_estimates_control,
nobs_control
) -> BootstrapTestResult:
"""
:param stat_val: sample value of statistic in treatment group
:param bootstrap_estimates: bootstrap estimates (10000 or so) of statistic
:param nobs: number of observations in initial sample (needed only for degrees of freedom for t-distribution)
:param stat_val_control: sample value of statistic in control group
:param bootstrap_estimates_control: same as above
:param nobs_control: same as above
:return: BootstrapTestResult(estimates_diff_std, est_p_value, est_t_statistic, median_est_diff,
median_est_t_statistic, median_est_t_p_value, stat_diff, t_statistic, t_p_value)
"""
estimates_diff = bootstrap_estimates - bootstrap_estimates_control
median_est_diff = np.median(estimates_diff, axis=0)
estimates_diff_std = np.std(estimates_diff, axis=0)
est_p_value = np.min([
(estimates_diff <= 0).mean(axis=0),
(estimates_diff > 0).mean(axis=0)
], axis=0) * 2
df = nobs + nobs_control - 2
est_t_statistic = stats.distributions.t.isf(est_p_value / 2, df=df) * np.sign(median_est_diff)
median_est_t_statistic = np.divide(median_est_diff, estimates_diff_std)
median_est_t_p_value = stats.distributions.t.sf(np.abs(median_est_t_statistic), df) * 2
stat_diff = stat_val - stat_val_control
t_statistic = np.divide(stat_diff, estimates_diff_std)
t_p_value = stats.distributions.t.sf(np.abs(t_statistic), df) * 2 # use np.abs to get upper tail
return BootstrapTestResult(estimates_diff_std, est_p_value, est_t_statistic, median_est_diff,
median_est_t_statistic, median_est_t_p_value, stat_diff, t_statistic, t_p_value) | 488f6c8cbd5f27f97840e511a0d8b04f5c3647dd | 3,634,019 |
import urllib
import tempfile
def query_ned_by_refcode(refcode='2011ApJS..193...18W',
root_url='http://nedwww.ipac.caltech.edu/cgi-bin/nph-objsearch'):
"""
Query NED for basic data on objects cited in a particular reference.
keywords:
refcode - 19-digit reference code for journal article.
Example: 2011ApJS..193...18W is the reference code for Willett et al. (2011), ApJS, 193, 18
Returns NED_MainTable with the following information for each target within the search radius:
-----------------------------------------------------
| Name | Unit | Type | Format |
-----------------------------------------------------
| No. | None | int32 | 12i |
| Object Name | None | |S30 | 30s |
| RA(deg) | degrees | float64 | 25.17e |
| DEC(deg) | degrees | float64 | 25.17e |
| Type | None | |S6 | 6s |
| Velocity | km/sec | float64 | 25.17e |
| Redshift | None | float64 | 25.17e |
| Redshift Flag | None | |S4 | 4s |
| Magnitude and Filter | None | |S5 | 5s |
| Distance (arcmin) | arcmin | float64 | 25.17e |
| References | None | int32 | 12i |
| Notes | None | int32 | 12i |
| Photometry Points | None | int32 | 12i |
| Positions | None | int32 | 12i |
| Redshift Points | None | int32 | 12i |
| Diameter Points | None | int32 | 12i |
| Associations | None | int32 | 12i |
-----------------------------------------------------
"""
# Create dictionary of search parameters, then parse into query URL
request_dict = {'search_type':'Search','refcode':refcode,'of':'xml_main'}
query_url = "%s?%s" % (root_url,urllib.urlencode(request_dict))
# Retrieve handler object from NED
U = urllib2.urlopen(query_url)
# Write the data to a file, flush it to get the proper VO table format, and read it into an atpy table
R = U.read()
U.close()
tf = tempfile.NamedTemporaryFile()
print >>tf,R
tf.file.flush()
t = atpy.Table(tf.name,type='vo',verbose=False)
# Return atpy table
return t | 31846479c7098708ff67862852b1d1f9812e4dbb | 3,634,020 |
def calc_distance_between_point_and_line(line_points, p3):
"""[Calcs the perpendicular distance between a point and a line]
Arguments:
line_points {[list]} -- [list of two 2-by-1 np arrays with the two points that define the line]
p3 {[np array]} -- [point to calculate the distance from]
"""
p1, p2 = np.array(line_points[0]), np.array(line_points[1])
return np.cross(p2 - p1, p3 - p1) / np.linalg.norm(p2 - p1) | 3993d40afc5be216c5e9e8b1dee1061c11d8dfb4 | 3,634,021 |
def set_hash_status(qhash, **kwargs):
"""
Set the enabled status of a hash
Variables:
qhash => Hash to change the status
Arguments:
None
Data Block:
"true"
Result example:
{"success": True}
"""
user = kwargs['user']
data = request.json
if len(qhash) not in [64, 40, 32]:
return make_api_response(None, "Invalid hash length", 400)
if 'admin' in user['type'] or 'signature_manager' in user['type']:
return make_api_response({'success': STORAGE.safelist.update(
qhash, [(STORAGE.safelist.UPDATE_SET, 'enabled', data)])})
return make_api_response({}, "You are not allowed to change the status", 403) | e6f5fe661b01ad1b9b591ab9c23f61b62bce81f9 | 3,634,022 |
import matplotlib.pyplot as plt
def test_generate_x(energy_model, xtrajs, sample_energies, max_energy=150,
figsize=None, layout=None, colors=None, titles=True):
""" Generates using x trajectories as an example
Parameters
----------
energy_model : Energy Model
Energy model object that must provide the function energy(x)
xtrajs : list of arrays
List of x-trajectories.
max_energy : float
Maximum energy to be shown in histograms
figsize : (width, height) or None
Figure size
layout : (rows, cols) or None
Arrangement of multi-axes plot
"""
# broadcast
if isinstance(xtrajs, list) and not isinstance(sample_energies, list):
sample_energies = [sample_energies for i in range(len(xtrajs))]
if not isinstance(xtrajs, list) and isinstance(sample_energies, list):
xtrajs = [xtrajs for i in range(len(sample_energies))]
if not isinstance(xtrajs, list) and not isinstance(sample_energies, list):
xtrajs = [xtrajs]
sample_energies = [sample_energies]
# generate according to x
#if std_z is None:
# std_z = self.std_z(np.vstack(xtrajs)) # compute std of sample trajs in z-space
#sample_z_z, sample_z_x, sample_z_energy_z, sample_z_energy_x = self.generate_x(std_z, nsample=nsample)
# compute generated energies
energies_sample_x_low = [se[np.where(se < max_energy)[0]] for se in sample_energies]
# plots
if figsize is None:
figsize = (5*len(xtrajs), 4)
if layout is None:
layout = (1, len(xtrajs))
if colors is None:
colors = ['blue' for i in len(xtrajs)]
fig, axes = plt.subplots(layout[0], layout[1], figsize=figsize)
for i, xtraj in enumerate(xtrajs):
# print some stats
print('Traj ', i, 'Fraction of low energies: ', np.size(energies_sample_x_low[i])/(1.0*sample_energies[i].size))
print('Traj ', i, 'Minimum energy: ', np.min(sample_energies[i]))
# plot generated energies
axes[i].hist(energies_sample_x_low[i], 70, density=True, histtype='stepfilled', color='black', alpha=0.2)
axes[i].hist(energies_sample_x_low[i], 70, density=True, histtype='step', color='black', linewidth=2, label='z sampling')
# plot simulated energies
if xtraj is not None:
energies_x = energy_model.energy(xtraj)
min_energy = min(energies_x.min(), energies_sample_x_low[i].min())
axes[i].hist(energies_x, 50, density=True, histtype='stepfilled', color=colors[i], alpha=0.2)
axes[i].hist(energies_x, 50, density=True, histtype='step', color=colors[i], linewidth=2, label='MD')
# plot energy histogram (comparison of input and generated)
axes[i].set_xlim(min_energy, max_energy)
axes[i].set_xlabel('Energy / kT')
axes[i].set_yticks([])
axes[i].set_ylabel('Density')
axes[i].legend(frameon=False)
if titles:
axes[i].set_title('Trajectory ' + str(i+1))
return fig, axes | ecda1737260fbe27b9fe2adcead0e1a12dec309d | 3,634,023 |
def ubatch_to_csv(batch):
"""
Utility function to convert a batch of APIUser data to CSV.
"""
permkey = 'permissions_dict'
fields = [k for k in batch[0].keys() if k != permkey]
fields.extend(batch[0][permkey].keys())
return '{}\n{}'.format(','.join(fields), '\n'.join([
','.join([str(r.get(f, r[permkey].get(f, None))) for f in fields])
for r in batch
])) | 9950cb8e1f79f2cc37580142a125717e7e534de1 | 3,634,024 |
import requests
def get_user_repositories(username: str, show_forked: bool) -> list[Repository]:
"""
Retrieve the github repositories for a specific user.
Args:
username: The github username
show_forked: Whether to keep or discard forked repos
Returns: The github repositories for the user.
"""
api_responses_per_page = 100 # Github currently has a max limit of 100 responses per page, though this could change
api_page = 1 # pages start at 1, not 0
repos: list[Repository] = []
response_json = True # Initialize to true to start the first loop. Will be a dictionary afterwards
while response_json:
query = {'per_page': api_responses_per_page, 'page': api_page}
response = requests.get(f"https://api.github.com/users/{username}/repos", params=query, auth=HTTPBasicAuth(username=GITHUB_USERNAME, password=GITHUB_TOKEN))
response_json = response.json()
if response.status_code != 200:
flask.abort(response.status_code)
return []
# Convert to Repository objects and add to repos
for repo_json in response_json:
if show_forked or not repo_json['fork']:
repos.append(Repository(repo_json))
api_page += 1 # Move to the next page
return repos | 97dc310b1efbfde1121dcc79fa6464a33ed943f5 | 3,634,025 |
import torch
def evaluate_sample(
ds,
sample_id,
t=None,
visualise=True,
gt_masked=None,
model=None,
mask_targ=None,
save=False,
pose=None,
):
"""
Evaluate one sample of a dataset (ds). Calculate PSNR and mAP,
and visualise different model components for this sample. Additionally,
1) a different timestep (`t`) can be chosen, which can be different from the
timestep of the sample (useful for rendering the same view over different
timesteps).
"""
if pose is None:
sample = ds[sample_id]
else:
sample = ds.__getitem__(sample_id, pose)
results = model.render(sample, t=t)
figure = None
output_person = "person_weights_sum" in results
output_transient = "_rgb_fine_transient" in results
img_wh = tuple(sample["img_wh"].numpy())
img_gt = ds.x2im(sample["rgbs"], type_="pt")
img_pred = ds.x2im(results["rgb_fine"][:, :3], type_="pt")
mask_stat = ds.x2im(results["_rgb_fine_static"][:, 3])
if output_transient:
mask_transient = ds.x2im(results["_rgb_fine_transient"][:, 4])
mask_pred = mask_transient
if output_person:
mask_person = ds.x2im(results["_rgb_fine_person"][:, 5])
mask_pred = mask_pred + mask_person
else:
mask_person = np.zeros_like(mask_transient)
beta = ds.x2im(results["beta"])
img_pred_static = ds.x2im(results["rgb_fine_static"][:, :3], type_="pt")
img_pred_transient = ds.x2im(results["_rgb_fine_transient"][:, :3])
if output_person:
img_pred_person = ds.x2im(results["_rgb_fine_person"][:, :3])
if mask_targ is not None:
average_precision = average_precision_score(
mask_targ.reshape(-1), mask_pred.reshape(-1)
)
psnr = metrics.psnr(img_pred, img_gt).item()
psnr_static = metrics.psnr(img_pred_static, img_gt).item()
if visualise:
figure, ax = plt.subplots(figsize=(8, 5))
figure.suptitle(f"Sample: {sample_id}.\n")
plt.tight_layout()
plt.subplot(331)
plt.title("GT")
if gt_masked is not None:
plt.imshow(torch.from_numpy(gt_masked))
else:
plt.imshow(img_gt)
plt.axis("off")
plt.subplot(332)
plt.title(f"Pred. PSNR: {psnr:.2f}")
plt.imshow(img_pred.clamp(0, 1))
plt.axis("off")
plt.subplot(333)
plt.axis("off")
plt.subplot(334)
plt.title(f"Static. PSNR: {psnr_static:.2f}")
plt.imshow(img_pred_static)
plt.axis("off")
plt.subplot(335)
plt.title(f"Transient")
plt.imshow(img_pred_transient)
plt.axis("off")
if "_rgb_fine_person" in results:
plt.subplot(336)
plt.title("Person")
plt.axis("off")
plt.imshow(img_pred_person)
else:
plt.subplot(336)
plt.axis("off")
plt.subplot(337)
if mask_targ is not None:
plt.title(f"Mask. AP: {average_precision:.4f}")
else:
plt.title("Mask.")
plt.imshow(mask_pred)
plt.axis("off")
plt.subplot(338)
plt.title(f"Mask: Transient.")
plt.imshow(mask_transient)
plt.axis("off")
plt.subplot(339)
plt.title(f"Mask: Person.")
plt.imshow(mask_person)
plt.axis("off")
if visualise and not save:
plt.show()
results = {}
results["figure"] = figure
results["im_tran"] = img_pred_transient
results["im_stat"] = img_pred_static
results["im_pred"] = img_pred
results["im_targ"] = img_gt
results["psnr"] = psnr
results["mask_pred"] = mask_pred
results["mask_stat"] = mask_stat
if output_person:
results["mask_pers"] = mask_person
results["im_pers"] = img_pred_person
results["mask_tran"] = mask_transient
if mask_targ is not None:
results["average_precision"] = average_precision
for k in results:
if k == "figure":
continue
if type(results[k]) == torch.Tensor:
results[k] = results[k].to("cpu")
return results | 0f87f7cd35fd6263c646de29241b4c807828055f | 3,634,026 |
from googleapiclient import discovery
from googleapiclient import errors
from typing import NamedTuple
def retrieve_best_run(
project_id: str, job_id: str
) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),
('max_iter', int)]):
"""Retrieves the parameters of the best Hypertune run."""
ml = discovery.build('ml', 'v1')
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = ml.projects().jobs().get(name=job_name)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print('Unexpected error')
print(response)
best_trial = response['trainingOutput']['trials'][0]
metric_value = best_trial['finalMetric']['objectiveValue']
alpha = float(best_trial['hyperparameters']['alpha'])
max_iter = int(best_trial['hyperparameters']['max_iter'])
return (metric_value, alpha, max_iter) | 8fdc98557703cbedf8620c1c62ca62a223f15dee | 3,634,027 |
def FindDeck(transact, msg_list):
"""
Returns None if not a deck message.
:param transact:
:param msg_list:
:return:
"""
for msg in msg_list:
if 'connectResp' in msg:
try:
deck = msg['connectResp']['deckMessage']['deckCards']
Log('Found Deck in new game connection')
Log(str(deck) + '\n')
except KeyError:
return
obj = DeckInfo(transact, deck)
return obj | 98e5ce5921f43c8ec260db052fcd7058d3c9f322 | 3,634,028 |
from typing import Optional
def mean(x: VariableLike,
dim: Optional[str] = None,
*,
out: Optional[VariableLike] = None) -> VariableLike:
"""Element-wise mean over the specified dimension.
If the input has variances, the variances stored in the output are based on
the "standard deviation of the mean", i.e.,
:math:`\\sigma_{mean} = \\sigma / \\sqrt{N}`.
:math:`N` is the length of the input dimension.
:math:`\\sigma` is estimated as the average of the standard deviations of
the input elements along that dimension.
See :py:func:`scipp.sum` on how rounding errors for float32 inputs are handled.
:param x: Input data.
:param dim: Dimension along which to calculate the mean. If not
given, the mean over all dimensions is calculated.
:param out: Optional output buffer.
:raises: If the dimension does not exist, or the dtype cannot be summed,
e.g., if it is a string.
:return: The mean of the input values.
:seealso: :py:func:`scipp.nanmean`.
"""
if dim is None:
return _call_cpp_func(_cpp.mean, x, out=out)
else:
return _call_cpp_func(_cpp.mean, x, dim=dim, out=out) | 471d6f36b1ed30ce5dc57ec3a018f4adc0325094 | 3,634,029 |
def HarmonicOscillator(inverse_mass_matrix, k=1.0, m=1.0):
"""Potential and Kinetic energy of an harmonic oscillator."""
def potential_energy(x: TensorVariable) -> TensorVariable:
return at.sum(0.5 * k * at.square(x))
def kinetic_energy(p: TensorVariable) -> TensorVariable:
v = inverse_mass_matrix * p
return at.sum(0.5 * at.dot(v, p))
return potential_energy, kinetic_energy | bf035050405f8d7d074ff1931e25d990cfba91f0 | 3,634,030 |
def guid_to_num(guid):
"""
Convert a DHT guid to an integer.
Args:
guid: The guid to convert, as a string or unicode, in
hexadecimal.
Returns:
An integer corresponding to the DHT guid given.
"""
return int(guid.rstrip('L'), base=16) | 7da3e7a60b6ae3410baab62083714f47a3afc790 | 3,634,031 |
def script_code(script_name):
"""Returns the four-letter ISO 15924 code of a script from its long name."""
load_data()
folded_script_name = _folded_script_name(script_name)
try:
return _HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE[folded_script_name]
except:
return _folded_script_name_to_code.get(folded_script_name, "Zzzz") | 6e04beff3dbaf22b0f348edc7fd2af201d16f024 | 3,634,032 |
from typing import Dict
def get_dashboard() -> Dict:
"""Get dashboard for user
:return: Returns dictionary with surveys and reports
:rtype: Dict
"""
user = get_user()
user_surveys = get_user_surveys(user)
result = []
for survey in user_surveys:
author = get_user(survey.AuthorId)
result.append({
'type': 'survey',
'endsOn': survey.EndsOn.timestamp() if survey.EndsOn is not None else None,
'startedOn': survey.StartedOn.timestamp() if survey.StartedOn is not None else None,
'id': survey.id,
'name': survey.Name,
'sharedTo': get_survey_users(survey),
'ankieterId': survey.AnkieterId,
'isActive': survey.IsActive,
'questionCount': survey.QuestionCount,
'backgroundImg': survey.BackgroundImg,
'userId': user.id,
'answersCount': get_answers_count(survey),
'authorId': author.id,
'authorName':author.CasLogin
})
user_reports = get_user_reports(user)
for report in user_reports:
try:
survey = get_survey(report.SurveyId)
except:
continue
author = get_user(report.AuthorId)
result.append({
'type': 'report',
'id': report.id,
'name': report.Name,
'sharedTo': get_report_users(report),
'connectedSurvey': {"id": report.SurveyId, "name": survey.Name},
'backgroundImg': report.BackgroundImg,
'userId': user.id,
'authorId': author.id,
'authorName': author.CasLogin
})
return {"objects": result} | 28d99043ffc1cd57b40916bc1a5839e8c38ee161 | 3,634,033 |
def acos(close):
"""Vector Trigonometric ACos
:param close:
:return:
:real:
"""
return ACOS(close) | 66c3be980464bb816435ee2ef65ab3ef2a4c3cfc | 3,634,034 |
import itertools
def gather_slice_list_items(slices, key):
"""For a list of slices, get the flattened list of all of a certain key."""
return list(itertools.chain(*[s[key] for s in slices if key in s])) | 068b511aefa124f9881f0d8cdc4d115b15922066 | 3,634,035 |
def mock_rasterio_open_cogs(band):
"""Mock rasterio Open for Sentinel2 dataset."""
assert band.startswith("s3://sentinel-cogs")
band = band.replace("s3://sentinel-cogs", SENTINEL_COG_BUCKET)
return rasterio.open(band) | 53e69cf3bd01dd9697d502aece59c00e16032fed | 3,634,036 |
def advance_time(df, delay, column=None, keep_all_timestep=False):
"""
This function rolls the given columns of the given dataframe by a number of hours defined by the delay.
It also erases the last n-rows (n=delay) of each sequences.
:param df:
:param delay:
:param column:
:param keep_all_timestep: (bool) if set to True, at each timestamp, for each column, the model not only roll the value
from the future, but also keeps all intermediate values between t and t+delay.
Therefore, each cell becomes an array containing n values (n=delay)
:return:
"""
df = df.copy()
if column is None:
column = list(df.columns)
if not isinstance(column, list):
column = [column]
if 'datetime' in column or 'datetime' in df.columns:
df.datetime += pd.Timedelta(delay, unit='h')
try:
column.remove('datetime')
except:
pass
sequences = np.unique(df.index.get_level_values(0))
if keep_all_timestep:
cols = dict()
for col in column:
cols[col] = []
for seq in sequences:
df_seq = df.loc[seq]
for col in column:
if not keep_all_timestep:
df.loc[(seq, col)] = np.roll(df_seq[col], -delay)
else:
list_timestamp = []
for t in range(delay+1):
list_timestamp.append(np.roll(df_seq[col], -t))
list_timestamp = np.vstack(list_timestamp).transpose().tolist()
cols[col] += list_timestamp
seq_length = len(df_seq)
indexes = np.arange(0, seq_length)[::-1]
df.loc[(seq, 'temp_index')] = indexes # Create temporary indexes to indicate which values to delete
if keep_all_timestep:
for col in cols:
df[col] = cols[col]
df = df.set_index('temp_index', append=True)
df.drop(np.arange(0, delay), level=2, inplace=True)
# Drop those values in the temporary index (this corresponds to the last delayed frames
df = df.reset_index(level=2, drop=True) # Remove the temporary indexes
df.index = df.index.set_levels(df.index.levels[1] + delay, level=1) # Shift the actual index by the delay
return df | f361e4b63ef68ea0cdb923bf8337787a6094fe83 | 3,634,037 |
from re import T
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
if r.component_name == "training":
s3.crud_strings["hrm_training"].label_create = T("Add Trainee")
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN) and not s3.filter:
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader = s3db.hrm_rheader,
)
return output | a3197b21baee42982b780dfe76d80fe08e043324 | 3,634,038 |
from pathlib import Path
def run_tests(datout, tests, dat_inst=None, sim_id="", trb_exp=False,
hor_avg=False, chunks=None, **kw):
"""Run test functions for WRF output postprocessed with WRFlux.
Thresholds are hard-coded.
Parameters
----------
datout : nested dict
Postprocessed output for all variables.
tests : list of str
Tests to perform.
Choices: testing.all_tests
dat_inst : xarray DataArray, optional
WRF instantaneous output needed for w test. The default is None.
sim_id : str, optional
ID of the current test simulation. The default is "".
trb_exp : bool, optional
Turbulent fluxes were calculated explicitly. The default is False.
hor_avg : bool, optional
Horizontal averaging was used in postprocessing. The default is False.
chunks : dict of integers, optional
Mapping from dimension "x" and/or "y" to chunk sizes used in postprocessing.
If given, the boundaries in the chunking directions are pruned.
The default is None.
**kw :
Keyword arguments passed to test functions.
Returns
-------
failed : pandas DataFrame
"FAIL" and "pass" labels for all tests and variables.
err : pandas DataFrame
R2 error statistics for performed tests.
"""
if tests is None:
tests = all_tests
tests = tests.copy()
for test in tests:
if test not in all_tests:
raise ValueError("Test {} not available! Available tests:\n{}".format(test, ", ".join(all_tests)))
variables = list(datout.keys())
failed = pd.DataFrame(columns=tests, index=variables)
err = pd.DataFrame(columns=tests, index=variables)
failed[:] = ""
err[:] = ""
# cut boundaries for non-periodic BC or if chunking was used
attrs = datout[variables[0]]["flux"].attrs
iloc = {}
if (not attrs["PERIODIC_X"]) or (chunks is not None and "x" in chunks):
iloc["x"] = slice(1, -1)
if (not attrs["PERIODIC_Y"]) or (chunks is not None and "y" in chunks):
iloc["y"] = slice(1, -1)
if attrs["PERIODIC_Y"] == 0:
if "Y=0" in tests:
tests.remove("Y=0")
avg_dims = None
if hor_avg:
avg_dims = []
dat = datout[variables[0]]["tend"]["adv"]
for d in tools.xy:
if (d not in dat.dims) and (d + "_stag" not in dat.dims):
avg_dims.append(d)
# for w test: cut first time step
dat_inst_lim = dat_inst.isel(Time=slice(1, None), **iloc)
datout_lim = {}
for v, datout_v in datout.items():
datout_lim[v] = {}
for n, dat in datout_v.items():
if "ID" in dat.dims:
# remove theta_pert label from budget method IDs if this does not lead to duplicate labels
IDs = []
for ID in dat.ID.values:
ID = ID.split(" ")
if "theta_pert" in ID:
ID_new = ID.copy()
ID_new.remove("theta_pert")
if len(ID_new) == 0:
ID_new = ["native"]
if ID_new not in dat.ID:
ID = ID_new
IDs.append(" ".join(ID))
dat["ID"] = IDs
if "dim_coords" in tests:
test_dim_coords(dat, dat_inst, v, n, failed)
if hor_avg:
for avg_dim in avg_dims:
for stag in ["", "_stag"]:
assert avg_dim + stag not in dat.dims
datout_lim[v][n] = tools.loc_data(dat, iloc=iloc)
fpath = Path(__file__).parent
for var, datout_v in datout_lim.items():
print("Variable: " + var)
figloc = fpath / "figures" / var
failed_i = {}
err_i = {}
if "budget" in tests:
tend = datout_v["tend"]["net"].sel(side="tendency")
forcing = datout_v["tend"]["net"].sel(side="forcing")
kw["figloc"] = figloc / "budget"
if (var == "w") and ("open BC y hor_avg" in sim_id):
kw["thresh"] = 0.995
elif (var in ["u", "v", "w"]) and ("open BC" in sim_id):
kw["thresh"] = 0.999
elif var == "t":
if "open BC" in sim_id:
kw["thresh"] = 0.999
if "symmetric BC" in sim_id:
kw["thresh"] = 0.995
elif attrs["USE_THETA_M"] == 1:
if attrs["OUTPUT_DRY_THETA_FLUXES"] == 0:
# lower thresh as cartesian tendency for thm is close to 0
if attrs["MP_PHYSICS"] > 0:
kw["thresh_cartesian"] = 0.96
kw["thresh"] = 0.9998
else:
kw["thresh_cartesian"] = 0.995
# reduce threshold for WENO and monotonic advection as
# dry theta budget is not perfectly closed
elif (attrs["SCALAR_ADV_OPT"] >= 3) or (attrs["MOIST_ADV_OPT"] >= 3):
kw["thresh"] = 0.88
elif attrs["MOIST_ADV_OPT"] == 2:
kw["thresh"] = 0.96
failed_i["budget"], err_i["budget"] = test_budget(tend, forcing, **kw)
for thresh in ["thresh", "thresh_cartesian"]:
if thresh in kw:
del kw[thresh]
adv = datout_v["tend"]["adv"]
if "decomp_sumdir" in tests:
if attrs["HESSELBERG_AVG"] == 0:
kw["thresh"] = 0.995
elif trb_exp:
kw["thresh"] = 0.999
kw["figloc"] = figloc / "decomp_sumdir"
failed_i["decomp_sumdir"], err_i["decomp_sumdir"] = test_decomp_sumdir(
adv, datout_v["corr"], **kw)
if "thresh" in kw:
del kw["thresh"]
if "decomp_sumcomp" in tests:
if trb_exp:
# reduce threshold for explicit turbulent fluxes
kw["thresh"] = 0.999
kw["figloc"] = figloc / "decomp_sumcomp"
failed_i["decomp_sumcomp"], err_i["decomp_sumcomp"] = test_decomp_sumcomp(adv, **kw)
if "thresh" in kw:
del kw["thresh"]
if ("dz_out" in tests) and (var != "q"): # TODOm: why so bad for q?
kw["figloc"] = figloc / "dz_out"
adv_noavgdir = adv
if hor_avg:
thresh = {"t": 0.85, "u": 0.7, "v": 0.995, "w": 0.92}
kw["thresh"] = thresh[var]
adv_noavgdir = adv.sel(dir=[d for d in adv.dir.values if d.lower() not in avg_dims])
failed_i["dz_out"], err_i["dz_out"] = test_dz_out(adv_noavgdir, **kw)
if "thresh" in kw:
del kw["thresh"]
if "adv_2nd" in tests:
kw["figloc"] = figloc / "adv_2nd"
failed_i["adv_2nd"], err_i["adv_2nd"] = test_2nd(adv, **kw)
if ("w" in tests) and (var == variables[-1]) and (dat_inst is not None):
# only do test once: for last variable
kw["figloc"] = figloc / "w"
failed_i["w"], err_i["w"] = test_w(dat_inst_lim, **kw)
if ("mass" in tests) and (var == "t"):
if "dz_out" in tests:
if hor_avg:
kw["thresh"] = 0.85
else:
kw["thresh"] = 0.995
elif attrs["HESSELBERG_AVG"] == 0:
kw["thresh"] = 0.99998
kw["figloc"] = figloc / "mass"
failed_i["mass"], err_i["mass"] = test_mass(datout_v["tend_mass"], **kw)
if "thresh" in kw:
del kw["thresh"]
if "periodic" in tests:
kw["figloc"] = figloc / "mass"
failed_i["periodic"] = test_periodic(datout_v, attrs, var, **kw)
if "NaN" in tests:
failed_i["NaN"] = test_nan(datout_v)
if "sgs" in tests:
sgs_sum = datout_v["tend"]["adv"].sel(comp="trb_s").sum("dir")
if np.allclose(sgs_sum[0], sgs_sum[1], atol=1e-7, rtol=1e-5):
failed_i["sgs"] = False
else:
failed_i["sgs"] = True
if hor_avg and ("Y=0" in tests):
failed_i["Y=0"], err_i["Y=0"] = test_y0(adv)
# store results
for test, f in failed_i.items():
if f:
failed.loc[var, test] = "FAIL"
else:
failed.loc[var, test] = "pass"
for test, e in err_i.items():
err.loc[var, test] = e
return failed, err | 008327f019c1f57b36d8aa67eef180d2b786a044 | 3,634,039 |
def reset_password(request):
"""view to reset the password"""
if request.method == "POST":
valid, errors = validate(*['code', 'password'], **get_request_data(request))
if not valid:
return Response({"error": errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
data = get_request_data(request)
hex_code = hexify(data.get("code"))
try:
forgot_password_model = ForgotPasswordModel.objects.get(code=hex_code)
except ForgotPasswordModel.DoesNotExist:
return Response({"error": ['code invalid']}, status=status.HTTP_404_NOT_FOUND)
user = forgot_password_model.user
user.set_password(data.get('password'))
user.save(update_fields=("password",))
forgot_password_model.delete()
return Response({"message": "Password reset successfully"}, status=status.HTTP_202_ACCEPTED)
else:
return Response({'error': ['{} method is not allowed'.format(request.method)]},
status=status.HTTP_405_METHOD_NOT_ALLOWED) | cf75ee3b2fa5249b01abc690e4adebfd6d1ac16c | 3,634,040 |
def str_to_datetime(date: str) -> dt:
"""Convert str to datetime"""
if date is None:
return None
return dt.strptime(date, '%Y-%m-%d') | 5db25c97fd6e79d7c8ed26a95c4809a5c432c705 | 3,634,041 |
def test_applies_method_filters(app):
"""Method filters are applied for generated and rendered templates"""
with app.test_request_context():
genshi = app.extensions['genshi']
@genshi.filter('html')
def prepend_title(template):
return template | Transformer('head/title').prepend('Flask-Genshi - ')
rendered = render_template('filter.html')
expected = ('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">\n'
'<html><head><title>Flask-Genshi - Hi!</title></head></html>')
assert rendered == expected | e7fecc970507fa2745cb87a6ebf0c341fb0f1c76 | 3,634,042 |
def contributions(datafile):
""" text data file => list of string """
contribs = []
with open(datafile, 'r') as data:
for line in data.readlines():
line = line.strip()
line_data = line.split(" ")
info_string = " ".join(line_data[:-1])
contrib = {}
contrib['url'] = line_data[-1].strip()
contrib['info'] = info_string.strip()
contribs.append(contrib)
return contribs | 37c5743df822be2cefdbe0bad60db35491ea599d | 3,634,043 |
from datetime import datetime
import dateutil
def utcnow():
"""
Get the current UTC time which has the time zone info.
"""
return datetime.datetime.now(dateutil.tz.tzutc()) | 9efe15bac944732ee5260ba0834b796370cae0d3 | 3,634,044 |
from corehq.apps.users.models import CouchUser
def get_xform_location(xform):
"""
Returns the sql location associated with the user who submitted an xform
"""
user_id = getattr(xform.metadata, 'userID', None)
if not user_id:
return None
user = CouchUser.get_by_user_id(user_id)
if hasattr(user, 'get_sql_location'):
return user.get_sql_location(xform.domain)
elif hasattr(user, 'sql_location'):
return user.sql_location
return None | e681325e161946bc12f32db721094ce9b68192f6 | 3,634,045 |
import cloudpickle
import inspect
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
if not cloudpickle:
raise ImportError("could not from joblib.externals import cloudpickle. Please install "
"cloudpickle to allow extended serialization. "
"(`pip install cloudpickle`).")
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper) | fdddcfbcff2137dd98b037171563e999ef53d138 | 3,634,046 |
def values(names):
"""
Method decorator that allows inject return values into method parameters.
It tries to find desired value going deep. For convinience injects list with only one value as value.
:param names: dict of "value-name": "method-parameter-name"
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
instance = None
if len(args)>1:
instance=args[1]
else:
instance = kwargs['instance']
def findReturnValues(rvalues):
for k, v in rvalues.iteritems():
if isinstance(v, dict):
findReturnValues(v) #go deep, to find desired name
if k in names.keys():
if isinstance(v,list) and len(v)==1:
kwargs.update({names[k]: v[0]})
else:
kwargs.update({names[k]: v})
findReturnValues(instance.returnValues)
#ensure all names was set
missing_params = [k for k, v in names.items() if v not in kwargs]
if missing_params:
raise AttributeError("Parameters {0} for '{1}' were not found".format(missing_params, func.__name__), missing_params)
func(*args, **kwargs)
return wrapped_func
return wrapper | 324bbe30c9c0ae508c479cd760d3debd84cfa3d6 | 3,634,047 |
def post():
""" Get. """
return render_template('public/post.html') | dc33760c5e45f777efa1ddde5f6dae19676e2809 | 3,634,048 |
from typing import List
def ensemble(models: List [training.Model],
model_input: Tensor) -> training.Model:
""" ensemble part
"""
outputs = [model.outputs[0] for model in models]
y = Average()(outputs)
model = Model(model_input, y, name='ensemble')
return model | 41ec18fafaa19e67b40371fe553e8bc6ebdb10b3 | 3,634,049 |
def save_layout(request, layout_id):
""" Save layout properties """
if request.method != 'POST':
return res.get_only_post_allowed({})
layout_entry = BluesteelLayoutEntry.objects.filter(id=layout_id).first()
if layout_entry is None:
return res.get_response(404, 'Bluesteel layout not found', {})
(json_valid, post_info) = val.validate_json_string(request.body)
if not json_valid:
return res.get_json_parser_failed({})
(obj_validated, val_resp_obj) = val.validate_obj_schema(post_info, BluesteelSchemas.SAVE_LAYOUT)
if not obj_validated:
return res.get_schema_failed(val_resp_obj)
layout_entry.name = val_resp_obj['name']
layout_entry.active = val_resp_obj['active']
layout_entry.project_index_path = val_resp_obj['project_index_path']
# Check if change of path in case we need to purge other services like 'performance tests service'
layout_entry.clamp_project_index_path()
layout_entry.check_active_state()
layout_entry.save()
return res.get_response(200, 'Layout saved', {}) | 735979a70ecb128589317a4eb26d7c077705a406 | 3,634,050 |
def _to_deck(group: ParseResults) -> Deck:
"""Parse a deck into a python list."""
result: Deck
if "size" in group:
N = int(group["size"])
if group["type"] == "C":
result = ["".join(group.value)]
elif group["type"] == "I":
result = [int("".join(k)) for k in group.value]
if len(result) != N:
raise RuntimeWarning(f"expected {N} integers, got {len(result)}")
elif group["type"] == "R":
result = [float("".join(k)) for k in group.value]
if len(result) != N:
raise RuntimeWarning(f"expected {N} reals, got {len(result)}")
else:
raise TypeError(f'{group["type"]} is not one of C, I or R')
else:
if group["type"] == "C":
result = ["".join(group.value)]
elif group["type"] == "I":
result = [int("".join(group.value))]
elif group["type"] == "R":
result = [float("".join(group.value))]
else:
raise TypeError(f'{group["type"]} is not one of C, I or R')
return result | dcf67e00ae293b88386d2b58bed66b59e7588b41 | 3,634,051 |
def build_gabriel_graph_from_delaunay(X, tri, delaunay_adjacency_matrix):
"""Remove edges from delaunay triangulation and returns the adjaceny matrix of a Gabriel graph
:param delaunay_adjacency_matrix: scipy sparse matrix (csr format)
"""
# Convert adjacency matrix to coo format for direct access to each node-edge-node pair
coo = delaunay_adjacency_matrix.tocoo()
# edge_idx contains the index of 2 nodes at each end of an edge
delaunay_edge_indices = np.vstack((coo.row, coo.col)).T
# Sort cols
delaunay_edge_indices = np.sort(delaunay_edge_indices, axis=1)
# ---- Trim Edges from Delaunay tessellation to get Gabriel graph ----
# Find centroid or midpoint of each edge in conns
c = tri.points[delaunay_edge_indices]
m = (c[:, 0, :] + c[:, 1, :]) / 2 # midpoint of each edge in graph
# Find the radius sphere between each pair of nodes
r = np.sqrt(np.sum((c[:, 0, :] - c[:, 1, :]) ** 2, axis=1)) / 2
# Use the kd-tree function in Scipy's spatial module & find the nearest point for each midpoint
n = cKDTree(X).query(x=m, k=1)[0]
# If nearest point to m is at a distance r, then the edge is a Gabriel edge
g = n >= (r * 0.999) # The factor is to avoid precision errors in the distances
# Reduce the connectivity to all True values found in g
row_ind, col_ind = delaunay_edge_indices[g].T
# Construct sparse adjacency matrix
adj_gabriel = csr_matrix((np.ones_like(row_ind), (row_ind, col_ind)), shape=(tri.npoints, tri.npoints))
return adj_gabriel | 6da3883cbc03357c737e358903d875535b8ee4fc | 3,634,052 |
import os
def list_image_files(dirs):
"""lists the images files under the dirs.
:return:
a list of tuples
the shape of tuple: (image_filename, subdir, fullpath)
"""
images = []
for a_dir in dirs:
for direntry in os.listdir(a_dir):
# Anything starting with "." is ignored
if direntry[0:1] == '.':
continue
catalog_dir = os.path.join(a_dir, direntry)
image_meta_file = os.path.join(catalog_dir, IMAGE_META_JSON_FILE)
if not os.path.exists(image_meta_file) or not os.path.isfile(image_meta_file):
continue
if direntry.endswith(".partclone.gz"):
images.append( (direntry, "", catalog_dir) )
pass
if os.path.isdir(catalog_dir):
for direntryinsubdir in os.listdir(catalog_dir):
# Anything starting with "." is ignored
if direntryinsubdir[0:1] == '.':
continue
if direntryinsubdir.endswith(".partclone.gz"):
images.append((direntryinsubdir, direntry, os.path.join(catalog_dir, direntryinsubdir)) )
pass
pass
pass
pass
pass
return images | e597e58a5ecf5380e4658fb36d1ee45a6b651bc3 | 3,634,053 |
import random
def randomize_demand(demand):
"""Return a randomized demand when given a static demand"""
return random.uniform(0, 2.25) * demand | 01eed8f0008e71af117920782a2a42b566055a89 | 3,634,054 |
def _collect_input_shape(input_tensors):
"""Collects the output shape(s) of a list of Keras tensors.
# Arguments
input_tensors: list of input tensors (or single input tensor).
# Returns
List of shape tuples (or single tuple), one tuple per input.
"""
input_tensors = to_list(input_tensors)
shapes = []
for x in input_tensors:
try:
shapes.append(K.int_shape(x))
except TypeError:
shapes.append(None)
return unpack_singleton(shapes) | 4c3dfc82f999c6def2a3c78c2f323de8a4e2c67c | 3,634,055 |
from datetime import datetime
import math
def iaga2df(iaga2002_fname, D_to_radians=True):
"""
Parser the magnetometer data record stored in the IAGA-2002 format
file *iaga2002_fname*. If *D_to_radians*, declination data (D) are
converted from degrees to radians. Return the tuple with the
:class:`DataFrame` containing the data and header information
"""
with open(iaga2002_fname) as fid:
# parse header
header, cols = parse_header(fid)
keys = ['B_' + x for x in cols]
# parse data
index = []
data_map = defaultdict(list)
for line in fid:
toks = line.split()
dt = datetime.strptime(toks[0] + ' ' + toks[1], '%Y-%m-%d %H:%M:%S.%f')
index.append(dt)
data = map(convert_float, toks[3:])
for key_i, data_i in zip(keys, data):
if key_i == 'B_D' and D_to_radians:
data_i = math.radians(data_i)
data_map[key_i].append(data_i)
df = PD.DataFrame(index=index, data=data_map)
return df, header | 394d24392a00aed1be4e5975794bb67fecf03f14 | 3,634,056 |
def geom_bar(mapping=aes(), *, fill=None, color=None, position="stack", size=None):
"""Create a bar chart that counts occurrences of the various values of the ``x`` aesthetic.
Supported aesthetics: ``x``, ``color``, ``fill``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomBar(mapping, fill=fill, color=color, position=position, size=size) | 5d617797742ad4ea1e2b13424bc80054f97be436 | 3,634,057 |
import argparse
def parse_args(args):
"""define arguments"""
parser = argparse.ArgumentParser(description="go_term_enrichment")
parser.add_argument(
"file_names",
type=str,
help="Name of folder and filenames for the promoters extracted",
)
parser.add_argument(
"go_directory",
type=str,
help="Directory location of go term enrichment files",
)
parser.add_argument(
"background_gene_set", type=str, help="Location of background gene set"
)
parser.add_argument(
"NCBI_gene_list", type=str, help="Location of NCBI gene list"
)
parser.add_argument(
"genes_of_interest", type=str, help="Location of genes of interest"
)
parser.add_argument(
"variable1_name",
type=str,
help="Optional replacement name for 2nd variable eg. non-specific",
default="constitutive",
nargs="?",
)
parser.add_argument(
"variable2_name",
type=str,
help="Optional replacement name for 2nd variable eg. tissue_specific",
default="variable",
nargs="?",
)
parser.add_argument(
"author_name",
type=str,
help="Optional replacement name for author in reference to the geneset",
default="Czechowski",
nargs="?",
)
return parser.parse_args(
args
) | 9501ca0e9e603231751a2e7fe7a1dcf90f753be4 | 3,634,058 |
def MobileNetV3_Large_Base(num_classes,
in_channels):
"""构建基础(大型)MobileNetV3
"""
return MobileNetV3_Large(num_classes=num_classes,
in_channels=in_channels,
alpha=1.0) | 979b48ee5488e0fe3a566983539a4cd9b683b862 | 3,634,059 |
def compute_trajectory_points(path, sgrid,
ugrid, xgrid,
dt=1e-2, smooth=True,
smooth_eps=1e-4):
"""Compute trajectory with uniform sampling time.
Note
----
Additionally, if `smooth` is True, the return trajectory is smooth
using least-square technique. The return trajectory, also
satisfies the discrete transition relation. That is
q[i+1] = q[i] + qd[i] * dt + qdd[i] * dt ^ 2 / 2
qd[i+1] = qd[i] + qdd[i] * dt
If one finds that the function takes too much time to terminate,
then it is very likely that the most time-consuming part is
least-square. In this case, there are several options that one
might take.
1. Set `smooth` to False. This might return badly conditioned
trajectory.
2. Reduce `dt`. This is the recommended option.
Parameters
----------
path : interpolator
sgrid : ndarray, shape (N+1,)
Array of gridpoints.
ugrid : ndarray, shape (N,)
Array of controls.
xgrid : ndarray, shape (N+1,)
Array of squared velocities.
dt : float, optional
Sampling time step.
smooth : bool, optional
If True, do least-square smoothing. See above for more details.
smooth_eps : float, optional
Relative gain of minimizing variations of joint accelerations.
Returns
-------
tgrid : ndarray, shape (M)
Time at each gridpoints.
q : ndarray, shape (M, dof)
Joint positions at each gridpoints.
qd : ndarray, shape (M, dof)
Joint velocities at each gridpoints.
qdd : ndarray, shape (M, dof)
Joint accelerations at each gridpoints.
"""
tgrid = np.zeros_like(sgrid) # Array of time at each gridpoint
N = sgrid.shape[0] - 1
sdgrid = np.sqrt(xgrid)
for i in range(N):
tgrid[i+1] = ((sgrid[i+1] - sgrid[i]) / (sdgrid[i] + sdgrid[i+1]) * 2
+ tgrid[i])
# shape (M+1,) array of sampled time
tsample = np.arange(tgrid[0], tgrid[-1], dt)
ssample = np.zeros_like(tsample) # sampled position
xsample = np.zeros_like(tsample) # sampled velocity squared
sdsample = np.zeros_like(tsample) # sampled velocity
usample = np.zeros_like(tsample) # sampled path acceleration
igrid = 0
for i, t in enumerate(tsample):
while t > tgrid[igrid + 1]:
igrid += 1
usample[i] = ugrid[igrid]
sdsample[i] = sdgrid[igrid] + (t - tgrid[igrid]) * usample[i]
xsample[i] = sdsample[i] ** 2
ssample[i] = (sgrid[igrid] +
(xsample[i] - xgrid[igrid]) / 2 / usample[i])
q = path.eval(ssample)
qs = path.evald(ssample) # derivative w.r.t [path position] s
qss = path.evaldd(ssample)
def array_mul(vectors, scalars):
# given array of vectors and array of scalars
# multiply each vector with each scalar
res = np.zeros_like(vectors)
for i in range(scalars.shape[0]):
res[i] = vectors[i] * scalars[i]
return res
qd = array_mul(qs, sdsample)
qdd = array_mul(qs, usample) + array_mul(qss, sdsample ** 2)
# Smoothing
if not smooth:
return tsample, q, qd, qdd, ssample
else:
dof = q.shape[1]
# Still slow, I will now try QP with quadprog
A = np.array([[1., dt], [0, 1.]])
B = np.array([dt ** 2 / 2, dt])
M = tsample.shape[0] - 1
Phi = np.zeros((2 * M, M))
for i in range(M): # Block diagonal
Phi[2 * i: 2 * i + 2, i] = B
for i in range(1, M): # First column
Phi[2 * i: 2 * i + 2, 0] = np.dot(A, Phi[2 * i - 2: 2 * i, 0])
for i in range(1, M): # Next column
Phi[2 * i:, i] = Phi[2 * i - 2: 2 * M - 2, i - 1]
Beta = np.zeros((2 * M, 2))
Beta[0: 2, :] = A
for i in range(1, M):
Beta[2 * i: 2 * i + 2, :] = np.dot(A, Beta[2 * i - 2: 2 * i, :])
Delta = np.zeros((M - 1, M))
for i in range(M-1):
Delta[i, i] = 1
Delta[i, i + 1] = - 1
for k in range(dof):
Xd = np.vstack((q[1:, k], qd[1:, k])).T.flatten() # numpy magic
x0 = np.r_[q[0, k], qd[0, k]]
xM = np.r_[q[-1, k], qd[-1, k]]
G = np.dot(Phi.T, Phi) + np.dot(Delta.T, Delta) * smooth_eps
a = - np.dot(Phi.T, Beta.dot(x0) - Xd)
C = Phi[2 * M - 2:].T
b = xM - Beta[2 * M - 2:].dot(x0)
sol = quadprog.solve_qp(G, a, C, b, meq=2)[0]
Xsol = np.dot(Phi, sol) + np.dot(Beta, x0)
Xsol = Xsol.reshape(-1, 2)
q[1:, k] = Xsol[:, 0]
qd[1:, k] = Xsol[:, 1]
qdd[:-1, k] = sol
qdd[-1, k] = sol[-1]
return tsample, q, qd, qdd, ssample | 924a68984a08d4db821e94e2fdf32879bd03f356 | 3,634,060 |
import argparse
def get_split_parser():
"""
Returns the parser used for the split tool which defines all the available arguments.
This can be used for generating documentation about the tool using Sphinx.
:return: the parser object
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(description='Randomly assign data to test, training, and validate sets')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-t", "--text-file", help="Split text file into train/test/validate sets", type=lambda x: is_valid_file(parser, x))
group.add_argument("-d", "--directory", help="Split directory into train/test/validate sets", type=lambda x: is_valid_dir(parser, x))
group.add_argument("-r", "--reset", help="Path to directory containing train/test/validate folders to reset", type=lambda x: is_valid_dir(parser, x))
parser.add_argument("-e", "--extension", help="File extension of the files to process (default: *)", type=str, default="*")
parser.add_argument("-tr", "--train", type=int, help="Percentage of files for training (default: 80%%)", default=80)
parser.add_argument("-te", "--test", type=int, help="Percentage of files for test (default: 10%%)", default=10)
parser.add_argument("-va", "--validate", type=int, help="Percentage of files for validate (default: 10%%)", default=10)
parser.add_argument("-nv", "--no-validate", action="store_true", help="Don't produce a validation set when splitting")
parser.add_argument("-ns", "--no-shuffle", action="store_true", help="Don't randomise when splitting data")
parser.add_argument("-nh", '--no-header', action="store_true", help="Use this flag when the text file has no headers")
return parser | 8d79214d1d17236d1e2ab7e8b4ad991a4f962e14 | 3,634,061 |
from typing import Tuple
def _reorder_cols(
df, key_columns: Tuple[str], master_grouping_key: str
) -> pd.DataFrame:
"""
Helper function for creating a user-friendly schema structure for the output prediction
dataframe that mirrors what would be expected (grouping columns preceding data).
:param df: dataframe with grouping columns appended to the predictions
:param key_columns: the key column names that have been appended right-most to the prediction
dataframe.
:param master_grouping_key: the master grouping key for setting the position of that column
:return: a reformatted and modified schema of the prediction dataframe
"""
masked_columns = [
col for col in df.columns if col not in key_columns + [master_grouping_key]
]
reordered_df = df[[master_grouping_key] + key_columns + masked_columns]
return reordered_df | 05d51a1d8d3869dfb6bd83d5d81e6c9b56638c73 | 3,634,062 |
import subprocess
def clip(text):
"""
Attempts to copy the specified text to the clipboard, returning
a boolean indicating success.
"""
text_bytes = text.encode()
try:
pbcopy = subprocess.Popen("pbcopy", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
pbcopy.communicate(text_bytes)
return(not pbcopy.returncode)
except OSError:
try:
xclip = subprocess.Popen("xclip", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
xclip.communicate(text_bytes)
return(not xclip.returncode)
except OSError:
pass
return(False) | 7096bc53dfc1d33af0536143ebb7d09c23e29e0f | 3,634,063 |
from typing import Dict
from typing import Any
import requests
def create_rule(rule: Dict[str, Any]) -> Dict[str, Any]:
"""Create a rule, returning the result from SmartThings."""
url = _url("/rules")
params = {"locationId": CONTEXT.get().location_id}
response = requests.post(url=url, headers=_headers(), params=params, json=rule)
_raise_for_status(response)
return response.json() | f68010520874d07b5c469d91d9e7ca804460535e | 3,634,064 |
from config import bot_config as _BOT_CONFIG
def _get_bot_config():
"""Returns the bot_config.py module. Imports it only once.
This file is called implicitly by _call_hook() and _call_hook_safe().
"""
global _BOT_CONFIG
if not _BOT_CONFIG:
return _BOT_CONFIG | 0b725caa079b37ac2274b4350c4473e5016efac2 | 3,634,065 |
def img_post_process(img_tensor):
"""Image postprocess
Convert torch.tensor() images into list of cv2 images.
1. Convert torch.tensor() to np.array(), and transpose [C, H, W] to [H, W, C].
2. Scale [0., 1.] into [0, 255].
3. Convert data format float to np.uint8.
4. Convert color channels from RGB to BGR (used in cv2 image).
Args:
img_tensor, torch.tensor() of shape [B, C, H, W].
Returns:
List of cv2 images.
"""
batch_size = img_tensor.shape[0]
new_imgs = []
for batch_idx in range(batch_size):
new_img = img_tensor.data.numpy()[batch_idx].transpose(1, 2, 0)
new_img = (new_img - new_img.min()) / (new_img.max()-new_img.min())*255
new_img = new_img.astype(np.uint8)
new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2BGR)
new_imgs.append(new_img)
return new_imgs | c43ded6097d726ce62e8ad1c8ae5025ce4d21cb8 | 3,634,066 |
def apply_grid(dataset, masker=None, scale=5, threshold=None):
""" Imposes a 3D grid on the brain volume and averages across all voxels
that fall within each cell.
Args:
dataset: Data to apply grid to. Either a Dataset instance, or a numpy
array with voxels in rows and features in columns.
masker: Optional Masker instance used to map between the created grid
and the dataset. This is only needed if dataset is a numpy array;
if dataset is a Dataset instance, the Masker in the dataset will
be used.
scale: int; scaling factor (in mm) to pass onto create_grid().
threshold: Optional float to pass to reduce.average_within_regions().
Returns:
A tuple of length 2, where the first element is a numpy array of
dimensions n_cubes x n_studies, and the second element is a numpy
array, with the same dimensions as the Masker instance in the current
Dataset, that maps voxel identities onto cell IDs in the grid.
"""
if masker is None:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
raise ValueError(
"If dataset is a numpy array, a masker must be provided.")
grid = imageutils.create_grid(masker.volume, scale)
cm = masker.mask(grid, in_global_mask=True)
data = average_within_regions(dataset, cm, threshold)
return (data, grid) | 22727b208f9f57037e2d35dc78acc7bdbd49212a | 3,634,067 |
from pathlib import Path
from typing import Optional
from typing import Set
def cookiecutter_template(
output_dir: Path,
repo: Repo,
cruft_state: CruftState,
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
checkout: Optional[str] = None,
deleted_paths: Optional[Set[Path]] = None,
update_deleted_paths: bool = False,
) -> CookiecutterContext:
"""Generate a clean cookiecutter template in output_dir."""
if deleted_paths is None:
deleted_paths = set()
pyproject_file = project_dir / "pyproject.toml"
commit = checkout or repo.remotes.origin.refs["HEAD"]
repo.head.reset(commit=commit, working_tree=True)
assert repo.working_dir is not None # nosec B101 (allow assert for type checking)
context = _generate_output(cruft_state, Path(repo.working_dir), cookiecutter_input, output_dir)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
if update_deleted_paths:
deleted_paths.update(_get_deleted_files(output_dir, project_dir))
# We now remove skipped and deleted paths from the project
_remove_paths(output_dir, skip_paths | deleted_paths) # type: ignore
return context | 0351039fc7022de1908fca2e2bc54676cd01bc56 | 3,634,068 |
def get_item():
"""Returns a dict representing an item."""
return {
'name': 'Nikon D3100 14.2 MP',
'category': 'Cameras',
'subcategory': 'Nikon Cameras',
'extended_info': {}
} | 692c3d83ee1cc04026e71b7ad7357ebd9930f47f | 3,634,069 |
import math
def humanify_ms(ms: int) -> str:
""" Converts an amount of millis to a more readable string.
Args:
ms (int): the amount of millis to convert
Returns:
The human string that represents the given amount of millis
"""
if ms > TimeUnits.MS_IN_MIN:
return "{:d}m {:d}s".format(
math.floor(
ms / TimeUnits.MS_IN_MIN
),
math.floor(
(ms % TimeUnits.MS_IN_MIN) / TimeUnits.MS_IN_SEC
)
)
if ms > TimeUnits.MS_IN_SEC:
return "{:.2f}s".format(ms / 1000)
return "{:d}ms".format(ms) | 6130ea6a6de05c12b04ae14be3ff2f180c113391 | 3,634,070 |
import torch
def abs_(input):
"""
In-place version of :func:`treetensor.torch.abs`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> t = ttorch.tensor([12, 0, -3])
>>> ttorch.abs_(t)
>>> t
tensor([12, 0, 3])
>>> t = ttorch.tensor({
... 'a': [12, 0, -3],
... 'b': {'x': [[-3, 1], [0, -2]]},
... })
>>> ttorch.abs_(t)
>>> t
<Tensor 0x7f1c81d07ca0>
├── a --> tensor([12, 0, 3])
└── b --> <Tensor 0x7f1c81d07d30>
└── x --> tensor([[3, 1],
[0, 2]])
"""
return torch.abs_(input) | 65b32c91cf00a72b94b950d0e65cca71390b8c24 | 3,634,071 |
import os
def n_age_sex():
"""Return DataFrame of population split by age and sex."""
path = os.path.join(INPUT_DIR, 'census/sex_by_age.xlsx')
data = pd.read_excel(path, 'Sheet 1', header=10, skipfooter=3, index_col=0)
data = data[2:][['Males', 'Females']].reset_index(drop=True)
return data | b1609aa4a4194bb3d73ddec94c46fe65706e1440 | 3,634,072 |
import pathlib
def get_filepath(filepath, overwrite):
"""
Get the filepath to download to and ensure dir exists.
Returns
-------
`pathlib.Path`, `bool`
"""
filepath = pathlib.Path(filepath)
if filepath.exists():
if not overwrite:
return str(filepath), True
if overwrite == 'unique':
filepath = replacement_filename(filepath)
if not filepath.parent.exists():
filepath.parent.mkdir(parents=True)
return filepath, False | bceb462f98f328d20226d6e516d78b027614cd01 | 3,634,073 |
import argparse
import re
import socket
def argparse_is_valid_hostname(hostname):
""" Validate the hostname passed in.
Returns the hostname if it is valid, otherwise it raises an exception.
"""
if len(hostname) > 255:
raise argparse.argumenttypeerror("Argument 'hostname' is not valid. " +
"Hostname cannot be longer than 255 " +
"characters. Exitting...")
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if not all(allowed.match(x) for x in hostname.split(".")):
raise argparse.argumenttypeerror("Argument 'hostname' is not valid. " +
"Exitting...")
else:
try:
if socket.gethostbyname(hostname):
return hostname
except Exception:
raise argparse.ArgumentTypeError("Argument 'hostname' is not " +
"valid. Hostname does not " +
"correspond to an existing " +
"hostname or IP address. " +
"Exitting...") | 0a5493a94bf4859a971e4079efb2b6ca6fdbaf96 | 3,634,074 |
import sys
def compute_census(img_l: np.ndarray = None, img_r: np.ndarray = None, offset: int = 7) -> (np.ndarray, np.ndarray):
"""
Census feature extraction (for more details see https://en.wikipedia.org/wiki/Census_transform)
:param img_l: left image
:param img_r: right image
:param offset: pixel offset on the four image borders
:return: lcensus_values, rcensus_values
"""
h, w, c = img_l.shape if len(img_l.shape) == 3 else img_l.shape + (1,)
# convert to float
img_l, img_r = Normalizer(img_l).norm_fun(), Normalizer(img_r).norm_fun()
lcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)
rcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)
print('\tLeft and right census...', end='')
sys.stdout.flush()
dawn = t.time()
# exclude pixels on the border (they will have no census values)
for y in range(offset, h-offset):
for x in range(offset, w-offset):
# extract left block region and subtract current pixel intensity as offset from it
image = img_l[y - offset:y + offset + 1, x - offset:x + offset + 1]
roi_offset = image - img_l[y, x]
# census calculation left image
lcensus_values[y, x] = vectorized_census(roi_offset)
# extract right block region and subtract current pixel intensity as offset from it
image = img_r[y - offset:y + offset + 1, x - offset:x + offset + 1]
roi_offset = image - img_r[y, x]
# census calculation right image
rcensus_values[y, x] = vectorized_census(roi_offset)
dusk = t.time()
print('\t(done in {:.2f}s)'.format(dusk - dawn))
return lcensus_values, rcensus_values | d4beb0f04ed8a7b80ba89790cd47b17ccc338caf | 3,634,075 |
import textwrap
def dedent(text):
"""Remove any common leading whitespace from every line in a given text."""
return textwrap.dedent(text) | 514f9f41feac1c19ff92d6c9258bf54d7d3d7bd8 | 3,634,076 |
from typing import Dict
from typing import List
def get_agents(agents: Dict, nr_players: int, action_num: int, state_shape: List):
"""
Initalize agents to play the game.
:param nr_players: Number of players, amount of agents generated
:param agents: Dictionary of agent_name: number of agents pairs
"""
agent_list = list()
i = 0
for agent_id, nr_agents in agents.items():
if agent_id == 'mocsar-nfsp-pytorch':
# Pre trained model from rlcard3.models.pretrained_models, NFSP has multiple (four) agents in it
# Here we directly load NFSP models from /models module
nfsp_agents = models.load(agent_id,
num_players=nr_players,
action_num= action_num,
state_shape=state_shape).agents
for j in range(nr_agents):
agent_list.append(nfsp_agents[j])
i += 1
if i >= nr_players:
return agent_list
for _ in range(nr_agents):
if agent_id in [ 'mocsar-dqn-pytorch', 'mocsar-dqn-pytorchr',
'mocsar-nfsp-pytorch', 'mocsar-nfsp-pytorchm']:
# Pre trained model from rlcard3.models.pretrained_models, DQN
rule_agent = models.load(agent_id,
num_players=nr_players,
action_num= action_num,
state_shape=state_shape).agents[0]
else:
# Models from model_agents
rule_agent = load(agent_id=agent_id)
agent_list.append(rule_agent)
i += 1
if i >= nr_players:
return agent_list | dfaee6e93e14a33659817da626f8dd990c46d528 | 3,634,077 |
def enumerate_square(i, n):
"""
Given i in the range(n^2-n) compute a bijective mapping
range(n^2-n) -> range(n)*range(n-1)
"""
row = int(i // (n-1))
col = int(i % (n-1))
if col >= row:
col += 1
return row, col | 93d3465c88a7bc9952161524fded4d7250131a65 | 3,634,078 |
def universal_transformer_with_lstm_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit,
pad_remover=None):
"""Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
"""
state, unused_inputs, memory = tf.unstack(layer_inputs,
num=None,
axis=0,
name="unstack")
# NOTE:
# state (ut_state): output of the lstm in the previous step
# inputs (ut_input): original input --> we don't use it here
# memory: lstm memory
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let lstm count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("lstm"):
# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})
transition_function_input_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar(
"lstm_input_gate", Q.reduce_mean(transition_function_input_gate))
# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})
transition_function_forget_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="forget",
bias_initializer=tf.zeros_initializer(),
activation=None,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)
transition_function_forget_gate = tf.sigmoid(
transition_function_forget_gate + forget_bias_tensor)
tf.contrib.summary.scalar(
"lstm_forget_gate",
Q.reduce_mean(transition_function_forget_gate))
# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})
transition_function_output_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="output",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar(
"lstm_output_gate",
Q.reduce_mean(transition_function_output_gate))
# lstm input modulation
transition_function_input_modulation = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input_modulation",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_memory = (
memory * transition_function_forget_gate +
transition_function_input_gate *
transition_function_input_modulation)
transition_function_output = (tf.tanh(transition_function_memory) *
transition_function_output_gate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, transition_function_memory | 41051acefd16acff70f25d6402c43bf92176217d | 3,634,079 |
def extract_names(bigrams):
"""
Tag each of the bigram tuples with the appropriate Part of Speech.
"""
named_bigrams = []
NUM_BIGRAMS = len(bigrams)
stemmer = LancasterStemmer()
for index, bigram in enumerate(bigrams):
if bigram[0].upper() in FIRST_NAMES:
person = " ".join(bigrams[index])
# If a winner is already on the list, don't add them
if person not in named_bigrams:
named_bigrams.append(person.title())
return named_bigrams | 0eaf7e29940750af52b4d096ea47bc3524897817 | 3,634,080 |
def GetActiveProjectAndAccount():
"""Get the active project name and account for the active credentials.
For use with wrapping legacy tools that take projects and credentials on
the command line.
Returns:
(str, str), A tuple whose first element is the project, and whose second
element is the account.
"""
project_name = properties.VALUES.core.project.Get(validate=False)
account = properties.VALUES.core.account.Get(validate=False)
return (project_name, account) | cde41445f0f0811a8e580ff617f08573fac91d9a | 3,634,081 |
import falcon
def csrf_protection(func):
"""
Protect resource from common CSRF attacks by checking user agent and referrer
"""
def wrapped(self, req, resp, *args, **kwargs):
# Assume curl and python-requests are used intentionally
if req.user_agent.startswith("curl/") or req.user_agent.startswith("python-requests/"):
return func(self, req, resp, *args, **kwargs)
# For everything else assert referrer
referrer = req.headers.get("REFERER")
if referrer:
scheme, netloc, path, params, query, fragment = urlparse(referrer)
if ":" in netloc:
host, port = netloc.split(":", 1)
else:
host, port = netloc, None
if host == req.host:
return func(self, req, resp, *args, **kwargs)
# Kaboom!
logger.warning("Prevented clickbait from '%s' with user agent '%s'",
referrer or "-", req.user_agent)
raise falcon.HTTPForbidden("Forbidden",
"No suitable UA or referrer provided, cross-site scripting disabled")
return wrapped | 411a89f02eee3d236ae1f1dc124dfac3a22800d8 | 3,634,082 |
from pathlib import Path
async def get_system(
system_id: UUID = Path(
..., description="ID of system to get", example=models.SYSTEM_ID
),
storage: StorageInterface = Depends(StorageInterface),
) -> models.StoredPVSystem:
"""Get a single PV System"""
with storage.start_transaction() as st:
return st.get_system(system_id) | 99d600f5f67c6aacd93866f2f874d5aec8fe2089 | 3,634,083 |
import re
def get_playback_time(playback_duration):
""" Get the playback time(in seconds) from the string:
Eg: PT0H1M59.89S
"""
# Get all the numbers in the string
numbers = re.split('[PTHMS]', playback_duration)
# remove all the empty strings
numbers = [value for value in numbers if value != '']
numbers.reverse()
total_duration = 0
for count, val in enumerate(numbers):
if count == 0:
total_duration += float(val)
elif count == 1:
total_duration += float(val) * 60
elif count == 2:
total_duration += float(val) * 60 * 60
return total_duration | 6a68c68ce465610b57626a725ac9c8889b527fdb | 3,634,084 |
def rate_limit(state, task_name, rate_limit, **kwargs):
"""Tell worker(s) to modify the rate limit for a task by type.
See Also:
:attr:`celery.task.base.Task.rate_limit`.
Arguments:
task_name (str): Type of task to set rate limit for.
rate_limit (int, str): New rate limit.
"""
# pylint: disable=redefined-outer-name
# XXX Note that this redefines `terminate`:
# Outside of this scope that is a function.
try:
rate(rate_limit)
except ValueError as exc:
return nok('Invalid rate limit string: {0!r}'.format(exc))
try:
state.app.tasks[task_name].rate_limit = rate_limit
except KeyError:
logger.error('Rate limit attempt for unknown task %s',
task_name, exc_info=True)
return nok('unknown task')
state.consumer.reset_rate_limits()
if not rate_limit:
logger.info('Rate limits disabled for tasks of type %s', task_name)
return ok('rate limit disabled successfully')
logger.info('New rate limit for tasks of type %s: %s.',
task_name, rate_limit)
return ok('new rate limit set successfully') | abdd903fe492e64dec799e02d9a4359814067a1e | 3,634,085 |
import torch
def listdict2dictlist(listdict: list, to_array=False) -> dict:
"""
@type listdict: list
@param listdict: list of dicts with the same keys
@return: dictlist: dict of lists of the same lengths
@rtype: dict
"""
d = {k: [d[k] for d in listdict] for k in listdict[0].keys()}
if to_array:
for k in d.keys():
v = d[k]
if torch.is_tensor(v[0]):
v = np.array([npy(v1) for v1 in v])
else:
v = np.array(v)
d[k] = v
return d | 77c464d1a2e272bf43b39489ea41294603464334 | 3,634,086 |
def hitLine(lineA, lineB, point, lineWidth):
"""Checks whether the point is in line or out.
lineA tuple: a point of the line.
lineB tuple: another point of the line.
point tuple: point we want to check.
lineWidth float: width of the line.
returns: True if in and False if out.
"""
if lineWidth < 0:
raise ValueError('Line width less than zero')
numerator = abs((lineB[1]-lineA[1])*point[0]-(lineB[0]-lineA[0])
* point[1]+lineB[0]*lineA[1]-lineB[1]*lineA[0])
denominator = max(distance.euclidean(lineA, lineB), 0.00001)
if numerator/denominator <= lineWidth+0.001:
if distance.euclidean(lineA, point) <= distance.euclidean(lineA, lineB) and distance.euclidean(lineB, point) <= distance.euclidean(lineA, lineB):
return True
return False | b20430c8ef161d19431c5e4cc19951cc09ffa352 | 3,634,087 |
def parse_args() -> Namespace:
"""
Parse arguments.
Parse optional arguments passed to the application during runtime and
return the results.
Returns
-------
Namespace
Returns a ``Namespace`` containing all of the arguments passed by the
user including defaults.
"""
parser = ArgumentParser(f'torchSR Version: {VERSION}')
commands = parser.add_subparsers(dest='function', metavar='function',
required=True)
train = commands.add_parser('train', help='Train an SRGAN model against an HD '
'dataset.')
train.add_argument('--batch-size', help='The number of images to include '
f'in every batch. Default: {BATCH_SIZE}.', type=int,
default=BATCH_SIZE)
train.add_argument('--data-workers', help='Specify the number of parallel '
'workers to spawn to read and preprocess data. In '
'general, the higher the number of workers, the faster '
'training and testing will be up to a certain point. A '
'good rule of thumb is to take the number of images in '
'the dataset, divide it by the batch size, and dividing '
'that by the number of GPUs being used, or 1 if '
'CPU-only while rounding up in both cases.', type=int,
default=16)
train.add_argument('--dataset-multiplier', help='Artificially increase '
'the size of the dataset by taking N number of random '
'samples from each image in the training dataset. The '
'default behavior is to take a single random square '
'subsection of each image, but depending on the '
'size of the subsection and the overall image size, '
'this could ignore over 99%% of the image. To increase '
'the number of samples per image, use a multiplier '
'greater than 1.', type=positive_integer, default=1)
train.add_argument('--disable-amp', help='Disable Automatic Mixed '
'Precision (AMP) which uses both float32 and float16 to'
' boost performance. Disabling AMP can decrease '
'performance by as much as 2X or more.',
action='store_true')
train.add_argument('--epochs', help='The number of epochs to run '
f'training for. Default: {EPOCHS}.', type=int,
default=EPOCHS)
train.add_argument('--gan-checkpoint', help='Specify an existing trained '
'model for the GAN-based training phase.', type=str)
train.add_argument('--gpus', help='The number of GPUs to use for training '
'on a single system. The GPUs will be automatically '
'selected in numerical order. Default: All available '
'GPUs.', type=int, default=None)
train.add_argument('--master-addr', help='The address to be used for all '
f'distributed communication. Default: {MASTER_ADDR}',
type=str, default=MASTER_ADDR)
train.add_argument('--master-port', help='The port to use for all '
f'distributed communication. Default {MASTER_PORT}',
type=str, default=MASTER_PORT)
train.add_argument('--model', help='Select the model to use for super '
'resolution.', type=str, default=MODEL,
choices=MODELS.keys())
train.add_argument('--pretrain-epochs', help='The number of epochs to '
f'run pretraining for. Default: {PRE_EPOCHS}.',
type=int, default=PRE_EPOCHS)
train.add_argument('--psnr-checkpoint', help='Specify an existing trained '
'model for the PSNR-based training phase.', type=str)
train.add_argument('--skip-image-save', help='By default, a sample image '
'is generated after every epoch and saved to the '
'"outputs/" directory. Add this flag to skip generating '
'and saving the image to reduce disk space.',
action='store_true')
train.add_argument('--train-dir', help='Specify the location to the '
'directory where training images are stored. Default: '
f'{TRAIN_DIR}.', type=str, default=TRAIN_DIR)
test = commands.add_parser('test', help='Generated a super resolution '
'image based on a trained SRGAN model.')
test.add_argument('image', type=str, help='Filename of image to upres.')
test.add_argument('--gpus', help='The number of GPUs to use for training '
'on a single system. The GPUs will be automatically '
'selected in numerical order. Default: All available '
'GPUs.', type=int, default=None)
test.add_argument('--model', help='Select the model to use for super '
'resolution.', choices=MODELS.keys(), type=str,
default=MODEL)
return parser.parse_args() | 8326c37ccd1a5878dd54fb84aa25ea78a87451c8 | 3,634,088 |
def PatchWord(ea, value):
"""
Change value of a program word (2 bytes)
@param ea: linear address
@param value: new value of the word
@return: 1 if successful, 0 if not
"""
return idaapi.patch_word(ea, value) | e2e03d198764b706f643c5a41aefa7a6a67fc387 | 3,634,089 |
def _binary_array_to_hex(arr):
"""
internal function to make a hex string out of a binary array
"""
h = 0
s = []
for i, v in enumerate(arr.flatten()):
if v:
h += 2**(i % 8)
if (i % 8) == 7:
s.append(hex(h)[2:].rjust(2, '0'))
h = 0
return "".join(s) | b705e4dc1dfc48f92f7c97dd7ba9d4dd4c4d0a98 | 3,634,090 |
def float_nsf(num, precision=17):
"""n-Significant Figures"""
return ('{0:.%ie}' % (precision - 1)).format(float(num)) | c2390b69364455adc6220e1e4aad81d7081bd5e4 | 3,634,091 |
import logging
def logit_layer_for_bitext(
nb_classes, # V
inputs, # [B, M, dim]
outputs, # [B, N]
dim,
nb_softmax_samples, # S
is_training,
approximation='botev-batch',
support=None, # [S]
importance=None, # [S]
name='logit'
):
"""
Logit strategies for sequences where the inputs and the outputs are defined over parallel sequences.
:param nb_classes: number of classes over which we define a softmax
:param inputs: forward activations [B, M, dim]
:param outputs: output labels [B, N]
:param dim: number of activations dim
:param nb_softmax_samples: use between 0 and nb_classes to get an approximation
:param is_training: for sampled approximations this switches between truncated/complete supports at training/prediction
:param approximation: which approximation to use
- 'botev': CSS with a shared support for all elements in a sequence
- 'botev-batch': CSS with a shared support for all sequences in batch
:param support: a batch-wise shared support of probable and negative classes
- necessary for botev-batch, ignored by others
:param importance: importance of elements in support
- necessary for botev-batch, ignored by others
:return: logits [B * T, V|S] and targets [B * T]
"""
batch_size = tf.shape(inputs)[0] # B
longest_input = tf.shape(inputs)[1] # M
longest_output = tf.shape(outputs)[1] # N
if 0 < nb_softmax_samples < nb_classes: # Here we employ a sampled softmax architecture
logging.info('%s sampled-softmax=%s', name, approximation)
if approximation == 'botev':
with tf.variable_scope('botev'):
# logits: [B, M, V|S]
# targets: [B, N]
logits, targets = botev_sampled_softmax_layer(
nb_classes=nb_classes,
nb_samples=nb_softmax_samples,
dim=dim,
labels=outputs, # [B, N]
inputs=inputs, # [B, M, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * M, V|S]
logits = tf.reshape(logits, [batch_size * longest_input, -1])
# [B * N]
targets = tf.reshape(targets, [batch_size * longest_output])
elif approximation == 'botev-batch':
if support is None or importance is None:
raise ValueError('Softmax approximation "botev-batch" requires "support" and "importance"')
with tf.variable_scope('botev-batch'):
# logits: [B, M, V|S]
# targets: [B, N]
logits, targets = botev_batch_sampled_softmax_layer(
nb_classes=nb_classes, # V
dim=dim,
labels=outputs, # [B, N]
support=support, # [S]
importance=importance, # [S]
inputs=inputs, # [B, M, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * M, V|S]
logits = tf.reshape(logits, [batch_size * longest_input, -1])
# [B * N]
targets = tf.reshape(targets, [batch_size * longest_output])
else:
raise ValueError('Unknown softmax approximation for bitext: %s' % approximation)
else: # Here we employ an exact softmax architecture
# [B * M, V]
logits = tf.contrib.layers.fully_connected(
tf.reshape(inputs, [batch_size * longest_input, dim]), # [B * M, dim]
num_outputs=nb_classes,
activation_fn=None # for logits
)
# Define targets
# [B * N]
targets = tf.reshape(outputs, [-1])
# [B * M, V|S], [B * N]
return logits, targets | 1efd3cffe3194c7bca9b571ddd01369adeef9207 | 3,634,092 |
import os
def get_counts_filename(align_path, output_dir):
"""returns counts output path
Arguments:
- align_path: path to the alignment file. The basename will be
modified to use a .txt suffix
- output_dir: directory where the counts file is to be written
"""
fn = os.path.basename(align_path)
fn = fn_suffixes.sub(".txt", fn)
counts_filename = os.path.join(output_dir, fn)
return counts_filename | 446ad99d4ab56234b41a07729003386f3a6389af | 3,634,093 |
def run_command(client: ParallelSSHClient, command: str) -> CommandResult:
"""Executes identical command on all hosts attached to client.
Will wait until all hosts complete the command execution or timeout is reached.
Re-raises pssh exceptions.
# TODO Handle more specific exceptions
"""
# stop_on_errors -> allows others hosts to execute when one crashes, combine exceptions
# output is like: (hostname, host_output)
try:
result = client.run_command(command, stop_on_errors=False)
client.join(result)
except pssh.exceptions.Timeout:
log.warning('Command `{}` reached time limit'.format(command))
raise
except pssh.exceptions.ProxyError as e:
log.error('Could not connect to proxy server, reason: {}'.format(e))
raise
except Exception as e:
log.critical(e)
raise # FIXME Find out what throws this exception
else:
log.debug('Command `{}` finished'.format(command))
return result | 2cf38470de443706a1109c1c08a3920ed202a6ca | 3,634,094 |
def angle_close(angle1, angle2):
""" Determines whether an angle1 is close to angle2. """
return abs(angle_difference(angle1, angle2)) < np.pi/8 | 7a480a94de8440ff50307e9c7a218424fe45675a | 3,634,095 |
def server_url():
# type: () -> Optional[str]
"""Get the configured server URL
"""
url = toolkit.config.get(SERVER_URL_CONF_KEY)
if not url:
raise ValueError("Configuration option '{}' is not set".format(
SERVER_URL_CONF_KEY))
if url[-1] == '/':
url = url[0:-1]
return url | e4958022e1beb415af4f23e93d678dd9982e1637 | 3,634,096 |
def clean_data():
"""
Method for cleaning the data and removing unnecessary features
Args:
None
Returns:
df (pandas dataframe): Return pandas dataframe
"""
df = pd.read_csv("dashboard/asset/data/kl_billboard.csv")
# Drop different types of roads such as motorway, trunk etc.
df.drop(columns = ["motorway", "trunk", "primary", "secondary", "tertiary"], inplace = True)
# Drop NaN
df.dropna(axis = "columns", inplace = True)
return df | b3fa3cd8b590b5f7ec4168006a0470c0398a2f8a | 3,634,097 |
from stingray.lightcurve import Lightcurve
from stingray.events import EventList
from stingray.crossspectrum import Crossspectrum
from hendrics.io import get_file_type
from stingray.io import _retrieve_pickle_object
import logging
def load_dataset_from_intermediate_file(fname):
"""Save Stingray object to intermediate file."""
# This will return an EventList, a light curve, a Powerspectrum, ...
# depending on the contents of the file
try:
ftype, contents = get_file_type(fname)
except:
contents = _retrieve_pickle_object(fname)
if isinstance(contents, Lightcurve):
return DataSet.get_lightcurve_dataset_from_stingray_Lightcurve(contents)
elif isinstance(contents, EventList):
return DataSet.get_eventlist_dataset_from_stingray_Eventlist(contents)
# This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
elif isinstance(contents, Crossspectrum):
logging.error("Unsupported intermediate file type: Crossspectrum")
else:
logging.error("Unsupported intermediate file type: %s" % type(stingray_object).__name__)
return None | e1603554494082bd4cc155a81d283225e4305e73 | 3,634,098 |
import functools
def _FlowMethod(func):
"""Decorator that checks the if port_id exists on board."""
@functools.wraps(func)
def wrapper(instance, port_id, *args, **kwargs):
if port_id not in instance.flows:
raise FlowManagerError('Not a exist port_id %d' % port_id)
return func(instance, port_id, *args, **kwargs)
return wrapper | d977b07b329c2943aa2dca465cab80227e8c67f3 | 3,634,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.