content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def create_attribute(representation_uuid, attribute_name):
"""create a representation of an attribute of a representation"""
try:
uuid = get_bus().create_attribute(representation_uuid, attribute_name, public=True)
return JsonResponse({'type': 'uuid', 'uuid': uuid})
except Exception as exception:
message, status = handle_exception(exception)
return JsonResponse({'message': message}, status=status)
|
f1ddfbc634b459a7e3f179125fd323beb871957d
| 3,640,200
|
from typing import Optional
import subprocess
import warnings
def get_current_git_hash(raise_on_error: bool = False) -> Optional[str]:
""" Return git hash of the latest commit
Parameters
----------
raise_on_error: bool, optional
If False (default), will return None, when it fails to obtain commit hash.
If True, will raise, when it fails to obtain commit hash.
Returns
-------
Short hash of the current HEAD or None.
"""
try:
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').strip()
except subprocess.CalledProcessError:
if raise_on_error:
raise
warnings.warn('Probably not in a git repo.')
git_hash = None
return git_hash
|
7a17dc7e0574b7f3da91f009714226f855723427
| 3,640,201
|
def check_shift(start_time, end_time, final_minute, starting_minute, record):
"""
Função que verifica o turno da chamada e calcula o valor da ligação
:param start_time:
:param end_time:
:param final_minute:
:param starting_minute:
:return value:
"""
nem_start_time = start_time + (starting_minute / 60)
nem_end_time = end_time + (final_minute / 60)
call_time = (record['end'] - record['start']) // 60
if 6 < nem_start_time < 22:
if 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo diurno
value = 0.36 + call_time * 0.09
else:
# Portanto a ligação iniciou no periodo diurno e terminou
# no periodo noturno
hour_max = 22
value = 0.36 + ((hour_max - nem_start_time) * 60) * 0.09
value = value + 0.36
else:
if not 6 < nem_end_time < 22:
# Portanto a ligação foi completada no periodo noturno
value = 0.36
else:
# Portanto a ligação iniciou no periodo noturno e terminou
# no periodo diurno
hour_min = 6
value = 0.36 + ((nem_end_time - hour_min) * 60) * 0.09
value = value + 0.36
return value
|
666883348347e8408b087ac63acd8608ff589a1c
| 3,640,202
|
import hashlib
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr):
"""
Represent a User as their profile picture or Gravatar
@param tablename: either "auth_user" or "pr_person" depending on which
table the 'user_id' refers to
@param attr: additional HTML attributes for the IMG(), such as _class
"""
size = (50, 50)
if user_id:
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db[tablename]
email = None
image = None
if tablename == "auth_user":
user = db(table.id == user_id).select(table.email,
cache = cache,
limitby = (0, 1),
).first()
if user:
email = user.email.strip().lower()
ltable = s3db.pr_person_user
itable = s3db.pr_image
query = (ltable.user_id == user_id) & \
(ltable.pe_id == itable.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
elif tablename == "pr_person":
user = db(table.id == user_id).select(table.pe_id,
cache = cache,
limitby = (0, 1),
).first()
if user:
ctable = s3db.pr_contact
query = (ctable.pe_id == user.pe_id) & \
(ctable.contact_method == "EMAIL")
email = db(query).select(ctable.value,
cache = cache,
limitby = (0, 1),
).first()
if email:
email = email.value
itable = s3db.pr_image
query = (itable.pe_id == user.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
if image:
image = s3db.pr_image_library_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default", f="download",
args=image)
elif gravatar:
if email:
# If no Image uploaded, try Gravatar, which also provides a nice fallback identicon
email_hash = hashlib.md5(email).hexdigest()
url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash
else:
url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm"
else:
url = URL(c="static", f="img", args="blank-user.gif")
else:
url = URL(c="static", f="img", args="blank-user.gif")
if "_class" not in attr:
attr["_class"] = "avatar"
if "_width" not in attr:
attr["_width"] = size[0]
if "_height" not in attr:
attr["_height"] = size[1]
return IMG(_src=url, **attr)
|
2c537a57a5d20ed8b4329338883f209fa9678fc4
| 3,640,203
|
from typing import Dict
from typing import List
import pathlib
import json
def json_loader(path_to_json_file: str) -> Dict[str, List[str]]:
"""Reads a JSON file and converts its content in a dictionary.
Parameters
----------
path_to_json_file: str
The path to the JSON file.
Returns
-------
Dict[str, List[str]]
A dictionary of source codes with the corresponding lists of instrument symbols of
interest for each source.
"""
with pathlib.Path(path_to_json_file).open('r') as infile:
return json.loads(infile.read())
|
d3f26504078e72e1522981a4d8ca5b60c3b8cf23
| 3,640,204
|
def get_region_solution_attribute(data, region_id, attribute, func, intervention):
"""Extract region solution attribute"""
regions = data.get('NEMSPDCaseFile').get('NemSpdOutputs').get('RegionSolution')
for i in regions:
if (i['@RegionID'] == region_id) and (i['@Intervention'] == intervention):
return func(i[attribute])
message = f'Attribute not found: {region_id} {attribute} {intervention}'
raise CasefileLookupError(message)
|
0dc26e54ae5f16f8b3158ec00a5dc0bc58776408
| 3,640,205
|
def conv1x1(in_planes: int, out_planes: int) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
|
2c8fcb8e04084ce35a2ae595b457fdf68fd27723
| 3,640,206
|
def make_ratio_map(amap, bmap):
"""Get the ratio of two PISA 2 style maps (amap/bmap) and return as another
PISA 2 style map."""
validate_maps(amap, bmap)
with np.errstate(divide='ignore', invalid='ignore'):
result = {'ebins': amap['ebins'],
'czbins': amap['czbins'],
'map': amap['map']/bmap['map']}
return result
|
9497ddadd6d983b3094107aceadbdc1e2f1fb0a7
| 3,640,207
|
from typing import Collection
from typing import Mapping
from typing import Sequence
import math
def _compose_duration(
components_tags: Collection[Tags]) -> Mapping[str, Sequence[str]]:
"""Returns summed duration tags."""
duration_seconds_values = [
component_tags.one_or_none(DURATION_SECONDS)
for component_tags in components_tags
]
if duration_seconds_values and None not in duration_seconds_values:
try:
return {
DURATION_SECONDS.name:
(str(math.fsum(map(float, duration_seconds_values))),),
}
except ValueError:
pass
return {}
|
99bebed06628627211a117c738d89790d11adc1b
| 3,640,208
|
def feature_spatial(fslDir, tempDir, aromaDir, melIC):
""" This function extracts the spatial feature scores. For each IC it determines the fraction of the mixture modeled thresholded Z-maps respecitvely located within the CSF or at the brain edges, using predefined standardized masks.
Parameters
---------------------------------------------------------------------------------
fslDir: Full path of the bin-directory of FSL
tempDir: Full path of a directory where temporary files can be stored (called 'temp_IC.nii.gz')
aromaDir: Full path of the ICA-AROMA directory, containing the mask-files (mask_edge.nii.gz, mask_csf.nii.gz & mask_out.nii.gz)
melIC: Full path of the nii.gz file containing mixture-modeled threholded (p>0.5) Z-maps, registered to the MNI152 2mm template
Returns
---------------------------------------------------------------------------------
edgeFract: Array of the edge fraction feature scores for the components of the melIC file
csfFract: Array of the CSF fraction feature scores for the components of the melIC file"""
# Get the number of ICs
numICs = int(commands.getoutput('%sfslinfo %s | grep dim4 | head -n1 | awk \'{print $2}\'' % (fslDir, melIC) ))
# Loop over ICs
edgeFract=np.zeros(numICs)
csfFract=np.zeros(numICs)
for i in range(0,numICs):
# Define temporary IC-file
tempIC = os.path.join(tempDir,'temp_IC.nii.gz')
# Extract IC from the merged melodic_IC_thr2MNI2mm file
os.system(' '.join([os.path.join(fslDir,'fslroi'),
melIC,
tempIC,
str(i),
'1']))
# Change to absolute Z-values
os.system(' '.join([os.path.join(fslDir,'fslmaths'),
tempIC,
'-abs',
tempIC]))
# Get sum of Z-values within the total Z-map (calculate via the mean and number of non-zero voxels)
totVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-V | awk \'{print $1}\''])))
if not (totVox == 0):
totMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-M'])))
else:
print ' - The spatial map of component ' + str(i+1) + ' is empty. Please check!'
totMean = 0
totSum = totMean * totVox
# Get sum of Z-values of the voxels located within the CSF (calculate via the mean and number of non-zero voxels)
csfVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_csf.nii.gz',
'-V | awk \'{print $1}\''])))
if not (csfVox == 0):
csfMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_csf.nii.gz',
'-M'])))
else:
csfMean = 0
csfSum = csfMean * csfVox
# Get sum of Z-values of the voxels located within the Edge (calculate via the mean and number of non-zero voxels)
edgeVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_edge.nii.gz',
'-V | awk \'{print $1}\''])))
if not (edgeVox == 0):
edgeMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_edge.nii.gz',
'-M'])))
else:
edgeMean = 0
edgeSum = edgeMean * edgeVox
# Get sum of Z-values of the voxels located outside the brain (calculate via the mean and number of non-zero voxels)
outVox = int(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_out.nii.gz',
'-V | awk \'{print $1}\''])))
if not (outVox == 0):
outMean = float(commands.getoutput(' '.join([os.path.join(fslDir,'fslstats'),
tempIC,
'-k mask_out.nii.gz',
'-M'])))
else:
outMean = 0
outSum = outMean * outVox
# Determine edge and CSF fraction
if not (totSum == 0):
edgeFract[i] = (outSum + edgeSum)/(totSum - csfSum)
csfFract[i] = csfSum / totSum
else:
edgeFract[i]=0
csfFract[i]=0
# Remove the temporary IC-file
os.remove(tempIC)
# Return feature scores
return edgeFract, csfFract
|
fb515a61bd81533b3b79f9cd500ad5b77723b527
| 3,640,209
|
from datetime import datetime
def str_to_date(date_str, fmt=DATE_STR_FMT):
"""Convert string date to datetime object."""
return datetime.datetime.strptime(date_str, fmt).date()
|
102f384b479b217259c9bbfe36c8b66909daee50
| 3,640,210
|
import time
import json
def create_elasticsearch_domain(name, account_id, boto_session, lambda_role, cidr):
"""
Create Elastic Search Domain
"""
boto_elasticsearch = boto_session.client('es')
total_time = 0
resource = "arn:aws:es:ap-southeast-2:{0}:domain/{1}/*".format(account_id, name)
access_policy = {"Version": "2012-10-17", "Statement": [
{"Effect": "Allow", "Principal": {"AWS": str(lambda_role)}, "Action": "es:*", "Resource": resource},
{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", "Resource": resource,
"Condition": {"IpAddress": {"aws:SourceIp": "{0}".format(cidr)}}}
]}
endpoint = None
time.sleep(5)
try:
print('Creating elasticsearch domain: {0}'.format(name))
boto_elasticsearch.create_elasticsearch_domain(
DomainName=name,
ElasticsearchVersion='2.3',
ElasticsearchClusterConfig={
'InstanceType': 't2.micro.elasticsearch',
'InstanceCount': 1,
'DedicatedMasterEnabled': False,
'ZoneAwarenessEnabled': False
},
EBSOptions={
'EBSEnabled': True,
'VolumeType': 'gp2',
'VolumeSize': 20
}
)
time.sleep(10)
attempts = 1
while True:
print('Trying to apply access policies to elasticsearch domain: {0} (attempt: {1})'.format(name, attempts))
try:
boto_elasticsearch.update_elasticsearch_domain_config(
DomainName=name,
AccessPolicies=json.dumps(access_policy)
)
break
except Exception as e:
attempts += 1
if attempts > 3:
print('Failed to apply access policies. Please run this script again with `-a delete -n {0}`'
'and wait approx 20 minutes before trying again'.format(name))
print('Full error was: {0}'.format(e))
exit(1)
else:
time.sleep(2)
except Exception as e:
print('Could not create elasticsearch domain: {0}.'.format(name))
print('Error was: {0}'.format(e))
exit(1)
while True:
try:
es_status = boto_elasticsearch.describe_elasticsearch_domain(DomainName=name)
processing = es_status['DomainStatus']['Processing']
if not processing:
endpoint = es_status['DomainStatus']['Endpoint']
print('Domain: {0} has been created!'.format(name))
break
else:
print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name))
time.sleep(120)
except Exception:
print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name))
total_time += 120
if total_time > 1800:
print('Script has been running for over 30 minutes... This likely means that your elastic search domain'
' has not created successfully. Please check the Elasticsearch Service dashboard in AWS console'
' and delete the domain named {0} if it exists before trying again'.format(name))
exit(1)
time.sleep(120)
return endpoint
|
5e33bd1454a2b3d1ce3bc1cc181b44497ce6035a
| 3,640,211
|
def get_user(email):
"""
param: username
returns User instance with user data, the MySQL error handle by the try-except senteces
"""
result = {}
connection = _connect_to_db()
try:
with connection.cursor() as cursor:
row_count = 0
e = 'none'
# Read a single record
sql = f"SELECT `id`,`name`, \
`last_name`, \
`email`, \
`password`, \
`phone_number`, \
`address`, \
`profile_image_url`, \
`city_id`, \
`account_type_id`, \
`lat_location`, \
`long_location`, \
`created_at`, \
`updated_at`, \
`active` FROM users WHERE users.email='{email}'"
cursor.execute(sql)
result = cursor.fetchone()
except Exception as ex:
#print(ex.args[1])
e = ex.args[0]
finally:
connection.close()
return result,e
|
f05a59dc95ade5157d09d26a7d899fd9b19d3526
| 3,640,212
|
def index(request):
"""
A example of Function-based view
method:
get
request:
None
response:
type: html
"""
return HttpResponse("Hello, world. You're at the polls index.")
|
1fe400e5f08728eef5834268d219a3f109325114
| 3,640,213
|
import requests
def make_https_request(logger, url, jobs_manager, download=False, timeout_attempt=0):
"""
Utility function for making HTTPs requests.
"""
try:
req = requests_retry_session().get(url, timeout=120)
req.raise_for_status()
except requests.exceptions.ConnectionError as c_err:
logger.error("Connection Error while fetching the cert list")
logger.error(str(c_err))
jobs_manager.record_job_error()
exit(1)
except requests.exceptions.HTTPError as h_err:
logger.warning("HTTP Error while fetching the cert list")
logger.warning(str(h_err))
return None
except requests.exceptions.RequestException as err:
logger.error("Request exception while fetching the cert list")
logger.error(str(err))
jobs_manager.record_job_error()
exit(1)
except Timeout:
if timeout_attempt == 0:
logger.warning("Timeout occurred. Attempting again...")
result = make_https_request(
logger, url, jobs_manager, download, timeout_attempt=1
)
return result
else:
logger.error("Too many timeouts. Exiting")
jobs_manager.record_job_error()
exit(1)
except Exception as e:
logger.error("UNKNOWN ERROR with the HTTP Request: " + str(e))
jobs_manager.record_job_error()
exit(1)
if req.status_code != 200:
logger.error("ERROR: Status code " + str(req.status_code))
return None
if download:
return req.content
return req.text
|
825d1d79dc44e571fed4437fb1fbebc60bfef669
| 3,640,214
|
def info():
"""
Displays on the website of the page. @TODO - Add index.html for the site of Ubik.
:return: response for each get request on '/'.
"""
return about_response()
|
1460d166c2cde36e931803b24fd235c1e100aa0d
| 3,640,215
|
def falshsort():
"""
Do from here: https://en.wikipedia.org/wiki/Flashsort
:return: None
"""
return None
|
359dc737a6611ebd6a73dd7761a6ade97b44b7ab
| 3,640,216
|
import random
def random_word(text, label, label_map, tokenizer, sel_prob):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:param label: labels such as ["D","O","O","D"]
:param label_map: labels such as [0,1,,0]
:param sel_prob: the prob to caluate the loss for each token
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
text = text.strip().split(" ")
orig_to_map_label = []
orig_to_map_token = []
assert len(text) == len(label_map)
assert len(text) == len(label)
for i in range(0, len(text)):
orig_token = text[i]
orig_label = label[i]
orig_label_map = label_map[i]
tokens = tokenizer.tokenize(orig_token)
orig_to_map_token.extend(tokens)
prob = random.random()
if orig_label == "D":
if prob < sel_prob:
orig_to_map_label.append(orig_label_map)
else:
orig_to_map_label.append(-1)
else:
if prob < sel_prob / 5.0:
orig_to_map_label.append(orig_label_map)
else:
orig_to_map_label.append(-1)
for j in range(1, len(tokens)):
orig_to_map_label.append(-1)
assert len(orig_to_map_label) == len(orig_to_map_token)
return orig_to_map_token, orig_to_map_label
|
46e7ac7d9fd0d82bbfef97a9a88efd0599bbc3b3
| 3,640,217
|
def ITERATIVETEST_Error_Of_Input_Parameter_inZSubsample():
"""Tests occurrence of an error because of an invalid input value provided at field "inZSubsample"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Invalid_Variation_1" : __getInvalidTestDataVariation1_inZSubsample(),
"Invalid_Variation_2" : __getInvalidTestDataVariation2_inZSubsample(),
}
return ( testDataVariations, testError_inZSubsample )
|
8de38b2ceaf1312442f170a75f96cec7ec9fdd7b
| 3,640,218
|
import numpy
def _match_storm_objects(first_prediction_dict, second_prediction_dict,
top_match_dir_name):
"""Matches storm objects between first and second prediction files.
F = number of storm objects in first prediction file
:param first_prediction_dict: Dictionary returned by
`prediction_io.read_ungridded_predictions` for first prediction file.
:param second_prediction_dict: Same but for second prediction file.
:param top_match_dir_name: See documentation at top of file.
:return: first_prediction_dict: Same as input, but containing only storm
objects matched with one in the second file.
:return: second_prediction_dict: Same as input, but containing only storm
objects matched with one in the first file. Both dictionaries have
storm objects in the same order.
"""
first_storm_times_unix_sec = first_prediction_dict[
prediction_io.STORM_TIMES_KEY]
first_unique_times_unix_sec = numpy.unique(first_storm_times_unix_sec)
first_indices = numpy.array([], dtype=int)
second_indices = numpy.array([], dtype=int)
for i in range(len(first_unique_times_unix_sec)):
this_match_file_name = tracking_io.find_match_file(
top_directory_name=top_match_dir_name,
valid_time_unix_sec=first_unique_times_unix_sec[i],
raise_error_if_missing=True)
print('Reading data from: "{0:s}"...'.format(this_match_file_name))
this_match_dict = tracking_io.read_matches(this_match_file_name)[0]
these_first_indices, these_second_indices = (
_match_storm_objects_one_time(
first_prediction_dict=first_prediction_dict,
second_prediction_dict=second_prediction_dict,
match_dict=this_match_dict)
)
first_indices = numpy.concatenate((first_indices, these_first_indices))
second_indices = numpy.concatenate((
second_indices, these_second_indices
))
_, unique_subindices = numpy.unique(first_indices, return_index=True)
first_indices = first_indices[unique_subindices]
second_indices = second_indices[unique_subindices]
_, unique_subindices = numpy.unique(second_indices, return_index=True)
first_indices = first_indices[unique_subindices]
second_indices = second_indices[unique_subindices]
first_prediction_dict = prediction_io.subset_ungridded_predictions(
prediction_dict=first_prediction_dict,
desired_storm_indices=first_indices)
second_prediction_dict = prediction_io.subset_ungridded_predictions(
prediction_dict=second_prediction_dict,
desired_storm_indices=second_indices)
return first_prediction_dict, second_prediction_dict
|
9c3bc60e99dc3d07cbd661b7187833e26f18d6f7
| 3,640,219
|
from typing import Optional
from typing import cast
def get_default_branch(base_url: str, auth: Optional[AuthBase], ssl_verify: bool = True) -> dict:
"""Fetch a reference.
:param base_url: base Nessie url
:param auth: Authentication settings
:param ssl_verify: ignore ssl errors if False
:return: json Nessie branch
"""
return cast(dict, _get(base_url + "/trees/tree", auth, ssl_verify=ssl_verify))
|
e2ac587705c82c95edeb415b79ca15746e9e9b78
| 3,640,220
|
import csv
def make_source_thesaurus(source_thesaurus=SOURCE_THESAURUS_FILE):
"""
Get dict mapping country name to `SourceObject` for the country.
Parameters
----------
source_thesaurus : str
Filepath for the source thesaurus data.
Returns
-------
Dict of {"Country": SourceObject} pairs.
"""
with open(source_thesaurus, 'rbU') as f:
f.readline() # skip headers
csvreader = csv.DictReader(f)
source_thesaurus = {}
for row in csvreader:
source_name = row['Source Name'].decode(UNICODE_ENCODING)
source_country = row['Country/Region'].decode(UNICODE_ENCODING)
source_url = row['Link/File'].decode(UNICODE_ENCODING)
source_priority = row['Prioritization'].decode(UNICODE_ENCODING)
# TODO: get year info from other other file (download from Google Drive)
source_thesaurus[source_name] = SourceObject(name=source_name,
country=source_country, priority=source_priority,
url=source_url)
return source_thesaurus
|
43109eb3c9f9dedccae5bf692abebb0d66d09f7f
| 3,640,221
|
import re
def extract_filter(s):
"""Extracts a filter from str `s`
Parameters
----------
s : str
* A str that may or may not have a filter identified by ', that HUMAN VALUE'
Returns
-------
s : str
* str `s` without the parsed_filter included
parsed_filter : dict
* filter attributes mapped from filter from `s` if any found
"""
split_filter = re.split(pytan.constants.FILTER_RE, s, re.IGNORECASE)
# split_filter = ['Folder Name Search with RegEx Match', ' is:.*']
parsed_filter = {}
# if filter parsed out from s
if len(split_filter) > 1:
# get new s from index 0
s = split_filter[0].strip()
# s='Folder Name Search with RegEx Match'
# get the filter string from index 1
parsed_filter = split_filter[1].strip()
# parsed_filter='is:.*'
parsed_filter = map_filter(parsed_filter)
if not parsed_filter:
err = "Filter {!r} is not a valid filter!".format
raise pytan.exceptions.HumanParserError(err(split_filter[1]))
dbg = 'parsed new string to {!r} and filters to:\n{}'.format
humanlog.debug(dbg(s, jsonify(parsed_filter)))
return s, parsed_filter
|
2a8dad70429fdc6295a44794f0c2ac6f19150f31
| 3,640,222
|
from typing import Tuple
from typing import Union
def sigmoid(
x,
sigmoid_type: str = "tanh",
normalization_range: Tuple[Union[float, int], Union[float, int]] = (0, 1)
):
"""
A sigmoid function. From Wikipedia (https://en.wikipedia.org/wiki/Sigmoid_function):
A sigmoid function is a mathematical function having a characteristic "S"-shaped curve
or sigmoid curve.
Args:
x: The input
sigmoid_type: Type of sigmoid function to use [str]. Can be one of:
* "tanh" or "logistic" (same thing)
* "arctan"
* "polynomial"
normalization_type: Range in which to normalize the sigmoid, shorthanded here in the
documentation as "N". This parameter is given as a two-element tuple (min, max).
After normalization:
>>> sigmoid(-Inf) == normalization_range[0]
>>> sigmoid(Inf) == normalization_range[1]
* In the special case of N = (0, 1):
>>> sigmoid(-Inf) == 0
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0.5
>>> d(sigmoid)/dx at x=0 == 0.5
* In the special case of N = (-1, 1):
>>> sigmoid(-Inf) == -1
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0
>>> d(sigmoid)/dx at x=0 == 1
Returns: The value of the sigmoid.
"""
### Sigmoid equations given here under the (-1, 1) normalization:
if sigmoid_type == ("tanh" or "logistic"):
# Note: tanh(x) is simply a scaled and shifted version of a logistic curve; after
# normalization these functions are identical.
s = np.tanh(x)
elif sigmoid_type == "arctan":
s = 2 / pi * np.arctan(pi / 2 * x)
elif sigmoid_type == "polynomial":
s = x / (1 + x ** 2) ** 0.5
else:
raise ValueError("Bad value of parameter 'type'!")
### Normalize
min = normalization_range[0]
max = normalization_range[1]
s_normalized = s * (max - min) / 2 + (max + min) / 2
return s_normalized
|
b9d660d20f7e398a2e57ad0b907ab52c4a88cc36
| 3,640,223
|
def interp(x, x1, y1, x2, y2):
"""Find a point along a line"""
return ((x2 - x) * y1 + (x - x1) * y2) / (x2 - x1)
|
3af4575c017a32619a5bb2866a7faea5ff5c760d
| 3,640,224
|
def msm_distance_measure_getter(X):
"""
generate the msm distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
n_dimensions = 1 # todo use other dimensions
return {
"distance_measure": [cython_wrapper(msm_distance)],
"dim_to_use": stats.randint(low=0, high=n_dimensions),
"c": [
0.01,
0.01375,
0.0175,
0.02125,
0.025,
0.02875,
0.0325,
0.03625,
0.04,
0.04375,
0.0475,
0.05125,
0.055,
0.05875,
0.0625,
0.06625,
0.07,
0.07375,
0.0775,
0.08125,
0.085,
0.08875,
0.0925,
0.09625,
0.1,
0.136,
0.172,
0.208,
0.244,
0.28,
0.316,
0.352,
0.388,
0.424,
0.46,
0.496,
0.532,
0.568,
0.604,
0.64,
0.676,
0.712,
0.748,
0.784,
0.82,
0.856,
0.892,
0.928,
0.964,
1,
1.36,
1.72,
2.08,
2.44,
2.8,
3.16,
3.52,
3.88,
4.24,
4.6,
4.96,
5.32,
5.68,
6.04,
6.4,
6.76,
7.12,
7.48,
7.84,
8.2,
8.56,
8.92,
9.28,
9.64,
10,
13.6,
17.2,
20.8,
24.4,
28,
31.6,
35.2,
38.8,
42.4,
46,
49.6,
53.2,
56.8,
60.4,
64,
67.6,
71.2,
74.8,
78.4,
82,
85.6,
89.2,
92.8,
96.4,
100,
],
}
|
1185658474f3a2b9bc17be67c665bdd0a211dce7
| 3,640,225
|
def gen_cpmfgp_test_data_from_config_file(config_file_name, raw_func,
num_tr_data, num_te_data):
""" Generates datasets for CP Multi-fidelity GP fitting. """
# Generate data
def _generate_data(_proc_func, _config, _num_data):
""" Generates data. """
ZX_proc = sample_from_config_space(_config, _num_data)
YY_proc = [_proc_func(z, x) for (z, x) in ZX_proc]
ZZ_proc = get_idxs_from_list_of_lists(ZX_proc, 0)
XX_proc = get_idxs_from_list_of_lists(ZX_proc, 1)
return ZZ_proc, XX_proc, YY_proc, ZX_proc
# Get dataset for testing
def _get_dataset_for_testing(_proc_func, _config, _num_tr_data, _num_te_data):
""" Get dataset for testing. """
ZZ_train, XX_train, YY_train, ZX_train = _generate_data(_proc_func, _config,
_num_tr_data)
ZZ_test, XX_test, YY_test, ZX_test = _generate_data(_proc_func, _config, _num_te_data)
return Namespace(config_file_name=config_file_name, config=config, raw_func=raw_func,
ZZ_train=ZZ_train, XX_train=XX_train, YY_train=YY_train, ZX_train=ZX_train,
ZZ_test=ZZ_test, XX_test=XX_test, YY_test=YY_test, ZX_test=ZX_test)
# Generate the data and return
config = load_config_file(config_file_name)
proc_func = get_processed_func_from_raw_func_via_config(raw_func, config)
return _get_dataset_for_testing(proc_func, config, num_tr_data, num_te_data)
|
c6efaf34601f5b02153fca3ea0926115d1adb918
| 3,640,226
|
def csv_to_postgres(engine,
file: str,
table_name: str):
"""
Given a *.csv filepath, create a populated table in a database
:param engine: SQLAlchemy connection/engine for the target database
:param file: Full filepath of the *.csv file
:param table_name: Name of the table to be created
:return:
"""
df = pd.read_csv(file,
index_col=False)
# print(df.head())
# Postgres columns are case-sensitive; make lowercase
df.columns = df.columns.str.lower()
df.rename(columns={'unnamed: 0': 'id'},
inplace=True)
df.to_sql(con=engine,
name=table_name,
if_exists='replace',
index=False)
return None
|
e8a913a32a3b0f7d9d617fa000f0b232e9824736
| 3,640,227
|
def _custom_padd(a, min_power_of_2=1024, min_zero_padd=50,
zero_padd_ratio=0.5):
""" Private helper to make a zeros-mirror-zeros padd to the next power of
two of a.
Parameters
----------
arrays : np.ndarray,
array to padd.
min_power_of_2 : int (default=512),
min length (power of two) for the padded array.
zero_padd_ratio : float (default=0.5),
determine the ratio of the length of zero padds (either for the first
or the second zero-padd) w.r.t the array length.
min_zero_padd : int (default=50)
min zero padd, either for the first or the second zero-padd.
Note:
-----
Having a signal close to ~200 can make trouble.
Results
-------
arrays : np.ndarray or list of np.ndarray
the unpadded array.
p : tuple of int,
the applied padd.
"""
if not np.log2(min_power_of_2).is_integer():
raise ValueError("min_power_of_2 should be a power of two, "
"got {0}".format(min_power_of_2))
nextpow2 = int(np.power(2, np.ceil(np.log2(len(a)))))
nextpow2 = min_power_of_2 if nextpow2 < min_power_of_2 else nextpow2
diff = nextpow2 - len(a)
# define the three possible padding
zero_padd_len = int(zero_padd_ratio * len(a))
too_short = zero_padd_len < min_zero_padd
zero_padd_len = min_zero_padd if too_short else zero_padd_len
p_zeros = (zero_padd_len, zero_padd_len)
len_padd_left = int(diff / 2)
len_padd_right = int(diff / 2) + (len(a) % 2)
p_total = (len_padd_left, len_padd_right)
if diff == 0:
# [ s ]
p_total = 0
return a, p_total
elif (0 < diff) and (diff < 2 * zero_padd_len):
# [ /zeros | s | zeros/ ]
a = padd(a, p_total)
return a, p_total
elif (2 * zero_padd_len < diff) and (diff < 4 * zero_padd_len):
# [ zeros | mirror-signal | s | mirror-signal | zeros ]
len_reflect_padd_left = len_padd_left - zero_padd_len
len_reflect_padd_right = len_padd_right - zero_padd_len
p_reflect = (len_reflect_padd_left, len_reflect_padd_right)
# padding
a = np.pad(a, p_reflect, mode='reflect')
a = padd(a, p_zeros)
return a, p_total
else:
# [ zeros | mirror-signal | zeros | s | zeros | mirror-signal | zeros ]
len_reflect_padd_left = len_padd_left - 2 * zero_padd_len
len_reflect_padd_right = len_padd_right - 2 * zero_padd_len
p_reflect = (len_reflect_padd_left, len_reflect_padd_right)
# padding
a = padd(a, p_zeros)
a = np.pad(a, p_reflect, mode='reflect')
a = padd(a, p_zeros)
return a, p_total
|
eb7f9d675113b8f53558d911462209c2f72c3ce3
| 3,640,228
|
def encode_multipart_formdata(fields, files):
"""
Encode multipart data to be used in data import
adapted from: http://code.activestate.com/recipes/146306/
:param fields: sequence of (name, value) elements for regular form fields.
:param files: sequence of (name, filename, value) elements for data to be uploaded as files
:return: (content_type, body) ready for httplib.HTTP instance
"""
boundary = '-------tHISiSsoMeMulTIFoRMbOUNDaRY---'
fls = []
for (key, value) in fields:
fls.append('--' + boundary)
fls.append('Content-Disposition: form-data; name="%s"' % key)
fls.append('')
fls.append(value)
for (key, filename, value) in files:
fls.append('--' + boundary)
fls.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
fls.append('Content-Type: %s' % get_content_type(filename))
fls.append('')
fls.append(value)
fls.append('--' + boundary + '--')
fls.append('')
output = BytesIO()
for content in fls:
if isinstance(content, bytes):
output.write(content)
else:
output.write(content.encode())
output.write(b"\r\n")
body = output.getvalue()
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
|
17cd1ce08d7aac005a07b77517c4644565083cb8
| 3,640,229
|
def is_recording():
""" return current state of recording key macro """
global recording
return recording
|
73f3a28d5d37bdc300768d48a6cc8f2ac81c2cf0
| 3,640,230
|
from typing import Union
from pathlib import Path
from typing import List
from typing import Dict
from typing import Optional
import asyncio
def download_image_urls(
urls_filename: Union[Path, str],
synsets: List[str],
max_concurrent: int = 50,
rewrite: bool = False
) -> Dict[str, Optional[List[str]]]:
"""Downloads urls for each synset and saves them in json format in a given path.
Args:
urls_filename: a path to the file where to save the urls.
synsets: a list of synsets for which to download urls.
max_concurrent (optional): a maximum number of concurrent requests.
rewrite (optional): if True, will download new urls even if file exists.
"""
print("Downloading image urls.")
synsets_to_urls = asyncio.run(_download_image_urls(urls_filename, synsets, max_concurrent, rewrite))
return synsets_to_urls
|
36bc1e2993cfd01ef9fca91354f970ffb980a919
| 3,640,231
|
def generate_new_split(lines1, lines2, rng, cutoff=14937):
"""Takes lines1 and lines2 and combines, shuffles and split again. Useful for working with random splits of data"""
lines = [l for l in lines1] # lines1 may not be a list but rather iterable
lines.extend(lines2)
perm = rng.permutation(len(lines))
lines = [lines[i] for i in perm]
lines1 = lines[:cutoff]
lines2 = lines[cutoff:]
a1 = confusion_matrix.get_embedding_matrix(lines1, normalize=True)
a2 = confusion_matrix.get_embedding_matrix(lines2, normalize=True)
return (lines1, a1, lines2, a2)
|
cac058f44bd5cb729517a1aeb67295a30dac2eb5
| 3,640,232
|
import time
def train_classification(base_iter,
model,
dataloader,
epoch,
criterion,
optimizer,
cfg,
writer=None):
"""Task of training video classification"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
for step, data in enumerate(dataloader):
base_iter = base_iter + 1
train_batch = data[0].cuda()
train_label = data[1].cuda()
data_time.update(time.time() - end)
outputs = model(train_batch)
loss = criterion(outputs, train_label)
prec1, prec5 = accuracy(outputs.data, train_label, topk=(1, 5))
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), train_label.size(0))
top1.update(prec1.item(), train_label.size(0))
top5.update(prec5.item(), train_label.size(0))
batch_time.update(time.time() - end)
end = time.time()
if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:
print('-------------------------------------------------------')
for param in optimizer.param_groups:
lr = param['lr']
print('lr: ', lr)
print_string = 'Epoch: [{0}][{1}/{2}]'.format(
epoch, step + 1, len(dataloader))
print(print_string)
print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(
data_time=data_time.val, batch_time=batch_time.val)
print(print_string)
print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)
print(print_string)
print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(
top1_acc=top1.avg, top5_acc=top5.avg)
print(print_string)
iteration = base_iter
writer.add_scalar('train_loss_iteration', losses.avg, iteration)
writer.add_scalar('train_top1_acc_iteration', top1.avg, iteration)
writer.add_scalar('train_top5_acc_iteration', top5.avg, iteration)
writer.add_scalar('train_batch_size_iteration',
train_label.size(0), iteration)
writer.add_scalar('learning_rate', lr, iteration)
return base_iter
|
8564fe2d520d54c81165adb5025aebb9916ab9b5
| 3,640,233
|
import logging
def get_top5(prediction):
"""return top5 index and value of input array"""
length = np.prod(prediction.size)
pre = np.reshape(prediction, [length])
ind = np.argsort(pre)
ind = ind[length - 5 :]
value = pre[ind]
ind = ind[::-1]
value = value[::-1]
res_str = ""
logging.info("============ top5 ===========")
for (i, v) in zip(ind, value):
logging.info("{}:{}".format(i, v))
res_str = res_str + "{}:{}".format(i, v) + "\n"
return res_str
|
607ad0b9eee550e1bb389a9a92a76472155bea16
| 3,640,234
|
def calculate_parentheses(cases):
"""Calculate all cases in parameter 'cases'
return : case that calculate and it's 24 else return 'No Solutions'
Example and Doctest :
>>> nums = [5, 5, 9, 5]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( ( 5 + 5 ) + 5 ) + 9 = 24'
>>> nums = [13, 2, 13, 13]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'2 * ( 13 - ( 13 / 13 ) ) = 24'
>>> nums = [1, 1, 2, 7]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( 1 + 2 ) * ( 1 + 7 ) = 24'
>>> nums = [200, -120, 10, 3]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'( ( -120 + 200 ) * 3 ) / 10 = 24'
>>> nums = [1, 1, 1, 9]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'No Solutions'
>>> # Check case that can occured 'divided by zero' problem
>>> nums = [13, 13, 13, 13]
>>> cases = generate_all_combinations(nums, '+-*/')
>>> calculate_parentheses(cases)
'No Solutions'
"""
# Use try except because some combination are error because 'divided by zero'
for i in cases:
case_to_calculate = i
num1 = case_to_calculate[0]
operation1 = case_to_calculate[1]
num2 = case_to_calculate[2]
operation2 = case_to_calculate[3]
num3 = case_to_calculate[4]
operation3 = case_to_calculate[5]
num4 = case_to_calculate[6]
# I use different variable name to make you read a program easier
# Case 1 : ( ( num1 'operation1' num2 ) 'operation2' num3 ) 'operation3' num4
case1 = f"( ( {num1} {operation1} {num2} ) {operation2} {num3} ) {operation3} {num4} = 24"
try :
calc1 = calc(num1, operation1, num2)
calc1 = calc(calc1, operation2, num3)
calc1 = calc(calc1, operation3, num4)
except ZeroDivisionError:
calc1 = 0
if calc1 == 24:
return case1
# Case 2 : ( num1 'operation1' ( num2 'operation2' num3 ) ) 'operation3' num4
case2 = f"( {num1} {operation1} ( {num2} {operation2} {num3} ) ){operation3} {num4} = 24"
try :
calc2 = calc(num2, operation2, num3)
calc2 = calc(num1, operation1, calc2)
calc2 = calc(calc2, operation3, num4)
except ZeroDivisionError:
calc2 = 0
if calc2 == 24:
return case2
# Case 3 : ( num1 'operation1' num2 ) 'operation2' ( num3 'operation3' num4 )
case3 = f"( {num1} {operation1} {num2} ) {operation2} ( {num3} {operation3} {num4} ) = 24"
try:
calc31 = calc(num1, operation1, num2)
calc32 = calc(num3, operation3, num4)
calc3 = calc(calc31, operation2, calc32)
except ZeroDivisionError:
calc3 = 0
if calc3 == 24:
return case3
# Case 4 : num1 'operation1' ( ( num2 'operation2' num3 ) 'operation3' num4 )
case4 = f"{num1} {operation1} ( ( {num2} {operation2} {num3} ) {operation3} {num4} ) = 24"
try:
calc4 = calc(num2, operation2, num3)
calc4 = calc(calc4, operation3, num4)
calc4 = calc(num1, operation1, calc4)
except ZeroDivisionError:
calc4 = 0
if calc4 == 24:
return case4
# Case 5 : num1 'operation1' ( num2 'operation2' ( num3 'operation3' num4 ) )
case5 = f"{num1} {operation1} ( {num2} {operation2} ( {num3} {operation3} {num4} ) ) = 24"
try :
calc5 = calc(num3, operation3, num4)
calc5 = calc(num2, operation2, calc5)
calc5 = calc(num1, operation1, calc5)
except ZeroDivisionError:
calc5 = 0
if calc5 == 24:
return case5
return 'No Solutions'
|
b8d51d677e16ed27c0b46ad0f4fe98751ea9759e
| 3,640,235
|
import hashlib
import hmac
import struct
def pbkdf2(hash_algorithm, password, salt, iterations, key_length):
"""
PBKDF2 from PKCS#5
:param hash_algorithm:
The string name of the hash algorithm to use: "sha1", "sha224", "sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError('iterations must be greater than 0')
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError('key_length must be greater than 0')
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256", "sha384",
"sha512", not %s
''',
repr(hash_algorithm)
))
ld = {
'md5' : hashlib.md5,
'sha1' : hashlib.sha1,
'sha224': hashlib.sha224,
'sha256': hashlib.sha256,
'sha384': hashlib.sha384,
'sha512': hashlib.sha512,
}
h = hmac.new(password, digestmod=ld[hash_algorithm])
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < key_length:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(iterations - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return key[:key_length]
|
8a3799a2c73b3b3be96f67252f210bc5d114d334
| 3,640,236
|
import time
def getLocalUtcTimeStamp():
"""
Get the universal timestamp for this machine.
"""
t = time.mktime(time.gmtime())
isDst = time.localtime().tm_isdst
return t - isDst * 60 * 60
|
54eed0893d03f3b6a76de0d36fc3f1ff5b35f64f
| 3,640,237
|
def melt_then_pivot_query(df, inspect_result, semiology_term):
"""
if happy all are the same semiology, after insepction of QUERY_SEMIOLOGY, melt then pivot_table:
---
inspect_result is a df
Ali Alim-Marvasti July 2019
"""
# find all localisation columns present:
localisation_labels = anatomical_regions(df)
relevant_localisations = [
cols for cols in inspect_result.columns if cols in localisation_labels]
# MELT
# first determine id_vars: in this case we don't use lateralisation add that too
full_id_cols = full_id_vars() + lateralisation_vars()
id_vars_present_in_query = [
cols for cols in inspect_result.columns if cols in full_id_cols]
inspect_result_melted = inspect_result.melt(id_vars=id_vars_present_in_query, value_vars=relevant_localisations,
var_name='melted_variable', value_name='melted_numbers')
# replace NaNs with 0s as melting creates many:
inspect_result_melted.fillna(value=0, inplace=True)
# PIVOT_TABLE
inspect_result_melted['pivot_by_column'] = semiology_term
pivot_result = inspect_result_melted.pivot_table(
index='pivot_by_column', columns='melted_variable', values='melted_numbers', aggfunc='sum')
# sort the columns of the pivot_table by ascending value:
pivot_result.sort_values(by=semiology_term, axis=1,
inplace=True, ascending=False)
return pivot_result
|
97ebd8f30d4b031a6b12412421ad0e4e2458c003
| 3,640,238
|
from typing import List
from typing import Tuple
def maze_solver(maze: List[List[int]]) -> List[Tuple[int, int]]:
"""
Finds the path that a light ray would take through a maze.
:param maze: 2D grid of cells, where 0 = empty cell, -1 = mirror at -45 degrees, 1 = mirror at 45 degrees
:return: The coordinates that the light passed, ordered by time
"""
validate_maze(maze)
coordinate = (0, 0)
direction = DIRECTION_RIGHT
path: List[Tuple[int, int]] = []
while 0 <= coordinate[0] < len(maze) and 0 <= coordinate[1] < len(maze[0]):
path.append(coordinate)
direction = next_direction(direction, maze[coordinate[0]][coordinate[1]])
coordinate = next_coordinate(coordinate, direction)
return path
|
4ea26e1dec318c9a41babf617d26662d35fe54c1
| 3,640,239
|
def looks_like_fasta(test_text):
"""Determine if text looks like FASTA formatted data.
Looks to find at least two lines. The first line MUST
start with '>' and the second line must NOT start with '>'.
Ignores any starting whitespace.
"""
text = test_text.strip()
return FASTA_START.match(text) is not None
|
352cde2d0d4de692e0598a96d19107ac04a66f53
| 3,640,240
|
import signal
def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs=1.0, N_bump=0, status=True):
"""
Design an FIR bandstop filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: The passband ripple cannot be set independent of the
stopband attenuation.
Note: The filter order is forced to be even (odd number of taps)
so there is a center tap that can be used to form 1 - H_BPF.
Mark Wickert October 2016
"""
# First design a BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1) / 2
f_stop = (f_stop2 - f_stop1) / 2
# Continue to design equivalent LPF
wc = 2 * np.pi * (f_pass + f_stop) / 2 / fs
delta_w = 2 * np.pi * (f_stop - f_pass) / fs
# Find the filter order
M = np.ceil((d_stop - 8) / (2.285 * delta_w))
# Adjust filter order up or down as needed
M += N_bump
# Make filter order even (odd number of taps)
if ((M + 1) / 2.0 - int((M + 1) / 2.0)) == 0:
M += 1
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps, beta)
n = np.arange(N_taps)
b_k = wc / np.pi * np.sinc(wc / np.pi * (n - M / 2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1) / 2
w0 = 2 * np.pi * f0 / fs
n = np.arange(len(b_k))
b_k_bs = 2 * b_k * np.cos(w0 * (n - M / 2))
# Transform BPF to BSF via 1 - BPF for odd N_taps
b_k_bs = -b_k_bs
b_k_bs[int(M / 2)] += 1
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bs
|
d5fccbcb9721707f0653065ad7c27d904cc05b97
| 3,640,241
|
from typing import List
def extract_text(html_text) -> List[List[str]]:
"""
:param html_text:
:return:
"""
lines = [i.text.replace("\xa0", "") for i in html_text.find("div", attrs={"class": "contentus"}).findAll("h3")]
return [line.split(" ") for line in lines if line]
|
19dfdd513e48f2662dc51661bfeca4b1155762a3
| 3,640,242
|
from typing import Iterable
from typing import Tuple
from re import X
from typing import Mapping
from typing import List
def multidict(pairs: Iterable[Tuple[X, Y]]) -> Mapping[X, List[Y]]:
"""Accumulate a multidict from a list of pairs."""
rv = defaultdict(list)
for key, value in pairs:
rv[key].append(value)
return dict(rv)
|
c94567169a8ea4e3d7fd9a8e5c2a990625181be8
| 3,640,243
|
import glob
def read_images(pathname):
"""
Read the images to a list given a path like 'images/cropped/*'
:param pathname: file path
:return: a list of color images and a list of corresponding file names
"""
images_path = sorted(glob.glob(pathname))
images = []
names = []
for path in images_path:
images.append(cv2.imread(path, cv2.IMREAD_COLOR))
name = path[-10:].split('.')[0]
names.append(name)
return images, names
|
9fd4644baa2ca5db204cd7ce442e85bc0d9e3166
| 3,640,244
|
import subprocess
def cov(path):
"""
Run a test coverage report.
:param path: Test coverage path
:return: Subprocess call result
"""
cmd = "py.test --cov-report term-missing --cov {0}".format(path)
return subprocess.call(cmd, shell=True)
|
521b78f712510b6438ad5bad45c951ddb0e1b623
| 3,640,245
|
def visualize_camera_movement(image1, image1_points, image2, image2_points, is_show_img_after_move=False):
"""
Plot the camera movement between two consecutive image frames
:param image1: First image at time stamp t
:param image1_points: Feature vector for the first image
:param image2: First image at time stamp t + 1
:param image2_points: Feature vectir for the second image
:param is_show_img_after_move: Bool variable to plot movement or not
"""
image1 = image1.copy()
image2 = image2.copy()
for i in range(0, len(image1_points)):
# Coordinates of a point on t frame
p1 = (int(image1_points[i][0]), int(image1_points[i][1]))
# Coordinates of the same point on t+1 frame
p2 = (int(image2_points[i][0]), int(image2_points[i][1]))
cv2.circle(image1, p1, 5, (0, 255, 0), 1)
cv2.arrowedLine(image1, p1, p2, (0, 255, 0), 1)
cv2.circle(image1, p2, 5, (255, 0, 0), 1)
if is_show_img_after_move:
cv2.circle(image2, p2, 5, (255, 0, 0), 1)
if is_show_img_after_move:
return image2
else:
return image1
|
5f92bf44885e62ebdc502e8c98ba1466cb8d5279
| 3,640,246
|
def get_rgb_masks(data, separate_green=False):
"""Get the RGGB Bayer pattern for the given data.
See `get_rgb_data` for description of data.
Args:
data (`numpy.array`): An array of data representing an image.
separate_green (bool, optional): If the two green channels should be separated,
default False.
Returns:
tuple(np.array, np.array, np.array): A 3-tuple of numpy arrays of `bool` type.
"""
r_mask = np.ones_like(data).astype(bool)
g1_mask = np.ones_like(data).astype(bool)
b_mask = np.ones_like(data).astype(bool)
if separate_green:
g2_mask = np.ones_like(data).astype(bool)
else:
g2_mask = g1_mask
if data.ndim == 2:
r_mask[1::2, 0::2] = False
g1_mask[1::2, 1::2] = False
g2_mask[0::2, 0::2] = False
b_mask[0::2, 1::2] = False
elif data.ndim == 3:
r_mask[..., 1::2, 0::2] = False
g1_mask[..., 1::2, 1::2] = False
g2_mask[..., 0::2, 0::2] = False
b_mask[..., 0::2, 1::2] = False
else:
raise Exception('Only 2D and 3D data allowed')
if separate_green:
return np.array([r_mask, g1_mask, g2_mask, b_mask])
else:
return np.array([r_mask, g1_mask, b_mask])
|
cf31103ca4248ccd96fb89181d0e48d5b71201c5
| 3,640,247
|
def _parse_ax(*args, **kwargs):
""" Parse plotting *args, **kwargs for an AxesSubplot. This allows for
axes and colormap to be passed as keyword or position.
Returns AxesSubplot, colormap, kwargs with *args removed"""
axes = kwargs.pop('axes', None)
cmap = kwargs.get('cmap', None)
if not axes:
indicies = [idx for (idx, arg) in enumerate(args) if isinstance(arg, Subplot)]
if len(indicies) < 1:
axes = None
elif len(indicies) > 1:
raise UtilsError("Multiple axes not understood")
else:
args = list(args)
axes = args.pop(indicies[0])
if args and not cmap:
if len(args) > 1:
raise UtilsError("Please only pass a colormap and/or Axes"
" subplot to Canvas plotting")
elif len(args) == 1:
kwargs['cmap'] = args[0]
# If string, replace cmap with true cmap instance (used by show())
if 'cmap' in kwargs:
cmap = kwargs['cmap']
if isinstance(cmap, str):
if cmap != 'pbinary' and cmap != 'pbinary_r': #special canvas word
kwargs['cmap'] = cm.get_cmap(cmap)
return axes, kwargs
|
78658e2cf66fad184c057b9392b918ffb48406be
| 3,640,248
|
import google.colab # noqa: F401
import IPython
import IPython
def _get_context():
"""Determine the most specific context that we're in.
Implementation from TensorBoard: https://git.io/JvObD.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE
|
b81205aedbe2222019fa6b7e9dc5fb638536869f
| 3,640,249
|
def loadSHSMFCCs(IDs):
"""
Load all of the 12-dim MFCC features
"""
IDDict = getSHSIDDict()
fin = open("SHSDataset/MFCC/bt_aligned_mfccs_shs.txt")
mfccs = {}
count = 0
while True:
ID = fin.readline().rstrip()
if not ID:
break
ID = IDDict[ID]
if count%1000 == 0:
print("Loaded mfccs for %i songs..."%count)
if not ID in IDs:
fin.readline()
count += 1
continue
x = fin.readline().rstrip()
x = x.split(",")
if len(x[-1]) == 0:
x = x[0:-1]
x = np.array([float(a) for a in x])
x = np.reshape(x, (len(x)/12, 12))
mfccs[ID] = x
count += 1
fin.close()
return mfccs
|
04d82be79d6e89c5f9b6304be82d049bf6af63f5
| 3,640,250
|
def __createTransactionElement(doc,tran):
"""
Return a DOM element represents the transaction given (tran)
"""
tranEle = doc.createElement("transaction")
symbolEle = __createSimpleNodeWithText(doc, "symbol", tran.symbol)
buyEle = __createSimpleNodeWithText(doc, "buy", "true" if tran.buy else "false")
quantityEle = __createSimpleNodeWithText(doc, "quantity", str(tran.num_of_shares))
priceEle = __createSimpleNodeWithText(doc, "price", str(tran.price_per_share))
netAmountEle = __createSimpleNodeWithText(doc, "net_amount", str(tran.net_amount))
timeEle = __createSimpleNodeWithText(doc, "time", tran.getTimeStr())
accountTypeEle = __createSimpleNodeWithText(doc, "account_type", str(tran.account_type))
tranEle.appendChild(symbolEle)
tranEle.appendChild(buyEle)
tranEle.appendChild(quantityEle)
tranEle.appendChild(priceEle)
tranEle.appendChild(netAmountEle)
tranEle.appendChild(timeEle)
tranEle.appendChild(accountTypeEle)
return tranEle
|
c09fc6abf4cb9599be23bcf6c91c6cc60330df0a
| 3,640,251
|
def received_information(update: Update, context: CallbackContext) -> int:
"""Store info provided by user and ask for the next category."""
text = update.message.text
category = context.user_data['choice']
context.user_data[category] = text.lower()
del context.user_data['choice']
update.message.reply_text(
"Neat! Just so you know, this is what you already told me:"
f"{facts_to_str(context.user_data)}"
"You can tell me more, or change your opinion on something.",
reply_markup=markup,
)
return CHOOSING
|
e7b93516975a497f6da11383969a14aeb31e6278
| 3,640,252
|
def set_payout_amount():
"""
define amount of insurance payout
NB must match what was defined in contract constructor at deployment
"""
return 500000e18
|
30ff7b07cbbe28b3150be2f1f470236875c8d0e3
| 3,640,253
|
def process_season_data(*args) -> pd.DataFrame:
"""
Takes multiple season data frames, cleans each and combines into single dataframe.
"""
return pd.concat(
map(
lambda df: basketball_reference.process_df_season_summary(
df=df, url_type="season_summary_per_game"
),
[*args],
),
axis=0,
)
|
9c2d46ba2b491382e91613e0dc0a35b68e4188cf
| 3,640,254
|
def build_frame(station_num: int, snapshots_num: int):
"""Function to build citi_bike Frame.
Args:
station_num (int): Number of stations.
snapshot_num (int): Number of in-memory snapshots.
Returns:
CitibikeFrame: Frame instance for citi-bike scenario.
"""
matrices_cls = gen_matrices_node_definition(station_num)
class CitibikeFrame(FrameBase):
stations = FrameNode(Station, station_num)
# for adj frame, we only need 1 node to hold the data
matrices = FrameNode(matrices_cls, 1)
def __init__(self):
super().__init__(enable_snapshot=True, total_snapshot=snapshots_num)
return CitibikeFrame()
|
2fd09885f488a4b42f9a2a4a19dfdd5c10743ef9
| 3,640,255
|
def rucklidge(XYZ, t, k=2, a=6.7):
"""
The Rucklidge Attractor.
x0 = (0.1,0,0)
"""
x, y, z = XYZ
x_dt = -k * x + y * (a - z)
y_dt = x
z_dt = -z + y**2
return x_dt, y_dt, z_dt
|
9d10aa89fb684a95474d45399ae09a38b507913c
| 3,640,256
|
import os
def resolve(dirs, *paths):
"""
Joins `paths` onto each dir in `dirs` using `os.path.join` until one of the join results is found to exist and
returns the existent result.
:param dirs: A list of dir strings to resolve against
:param paths: Path components to join onto each dir in `dirs`
:return A path created by calling `os.path.join` on a dir in `dirs` with `*paths`.
:raises ValueError: If `dirs` is empty
:raises FileNotFoundError: If joining `paths` onto all dirs in `dirs` always resulted in non-existent paths.
"""
if len(dirs) == 0:
raise ValueError("dirs empty: cannot resolve paths against *no* dirs: dirs must contain at least one element")
ret = try_resolve(dirs, *paths)
if ret is not None:
return ret
elif len(dirs) == 1:
raise FileNotFoundError("{path}: No such file or directory".format(path=os.path.join(dirs[0], *paths)))
else:
attempted_paths = [os.path.join(d, *paths) for d in dirs]
path = os.path.join(*paths)
attempt_str = ", ".join(list(map(lambda p: "'" + p + "'", attempted_paths)))
raise FileNotFoundError("{path}: could not be found after trying {paths}".format(path=path, paths=attempt_str))
|
d9f8b8e4049091ab0354b56e283f322ddd3d37e0
| 3,640,257
|
def multiext(prefix, *extensions):
"""Expand a given prefix with multiple extensions (e.g. .txt, .csv, _peaks.bed, ...)."""
if any((r"/" in ext or r"\\" in ext) for ext in extensions):
raise WorkflowError(
r"Extensions for multiext may not contain path delimiters " r"(/,\)."
)
return [flag(prefix + ext, "multiext", flag_value=prefix) for ext in extensions]
|
39bda078a856cb14fc65174ff48be81909b9034a
| 3,640,258
|
def sum_range(n, total=0):
"""Sum the integers from 1 to n.
Obviously the same as n(n+1)/2, but this is a test, not a demo.
>>> sum_range(1)
1
>>> sum_range(100)
5050
>>> sum_range(100000)
5000050000L
"""
if not n:
return total
else:
raise TailCall(sum_range, n - 1, n + total)
|
6126dd1012346a388ddc37c5a8965f3662b8ad7d
| 3,640,259
|
import logging
import time
import pickle
def parallel_evaluation_mp(candidates, args):
"""
Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
Args:
candidates: list the candidate solutions
args: a dictionary of keyword arguments
Returns:
Notes:
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable and will be unavailable to your function.
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count)
"""
logger = logging.getLogger('optimModels')
try:
evaluator = args['mp_evaluator']
except KeyError:
logger.error('parallel_evaluation_mp requires \'mp_evaluator\' be defined in the keyword arguments list.')
raise
try:
nprocs = args['mp_nprocs']
except KeyError:
logger.error('parallel_evaluation_mp requires \'mp_nprocs\' be defined in the keyword arguments list.')
raise
start = time.time()
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_mp'.format(key))
pass
# print("--- %s seconds ---" % (time.time() - start), 'end_pickled')
try:
pool = MyPool(processes=nprocs)
results = [pool.apply_async(evaluator, ([c], pickled_args)) for c in candidates]
pool.close()
pool.join()
except (OSError, RuntimeError) as e:
logger.error('failed parallel_evaluation_mp: {0}'.format(str(e)))
raise
else:
end = time.time()
print('completed parallel_evaluation_mp in {0} seconds'.format(end - start))
logger.debug('completed parallel_evaluation_mp in {0} seconds'.format(end - start))
# print("--- %s seconds ---" % (time.time() - start), 'end_pop')
return [r.get()[0] for r in results]
|
4562a976306ec8c3684d079b307a4fb1a83d09ab
| 3,640,260
|
from tcrsampler.sampler import TCRsampler
def _default_tcrsampler_human_beta(default_background = None, default_background_if_missing=None):
"""
Responsible for providing the default human beta sampler 'britanova_human_beta_t_cb.tsv.sampler.tsv'
Returns
-------
t : tcrsampler.sampler.TCRsampler
"""
if default_background is None:
default_background = 'britanova_human_beta_t_cb.tsv.sampler.tsv'
if default_background_if_missing is None:
default_background_if_missing ='britanova_human_beta_t_cb.tsv.sampler.tsv.zip'
print(default_background)
try:
t = TCRsampler(default_background=default_background)
except OSError:
t = TCRsampler()
t.download_background_file(default_background_if_missing)
t = TCRsampler(default_background=default_background)
return t
|
15b682b3e14e9496514efaf287b10ae6acb12441
| 3,640,261
|
def _dblock_to_raw(
mkh5_f, dblock_path, garv_annotations=None, apparatus_yaml=None,
):
"""convert one mkh5 datablock+header into one mne.RawArray
Ingest one mkh5 format data block and return an mne.RawArray
populated with enough data and channel information to use mne.viz
and the mne.Epochs, mne.Evoked EEG pipeline.
Parameters
----------
dblock_path : str
HDF5 slash path to an mkh5 data block which an h5py.Dataset
garv_annotations: None or dict
event_channel: str, channel name with events to annotate
start, stop: float relative to time lock event
unit: "ms" or "s"
apparatus_yaml: str, optional
filepath to YAML apparatus file with stream and sensor space info
to override native mkh5 hdr["apparatus"] if any.
Returns
-------
mne.RawArray
with channel locations from apparatus_yaml and mkh5 epochs tables
JSONified and tucked into the Info["description"]
Notes
-----
The raw_dblock returned from this can be stacked with
mne.concatenate_raws() though MNE behavior is to use first
info and just stack the datablocks. epochs metadata is
collected and returned per block so the complete record
can be tucked into the (one) info object when the dblocks
are stacked.
The raw.set_eeg_reference(ref_channels=[]) at the end to block
subsequent mne's default automatic average rereferencing later in
the pipeline (sheesh).
"""
h5data = mkh5.mkh5(mkh5_f)
try:
assert dblock_path in h5data.dblock_paths, "please report this bug"
hdr, dblock = h5data.get_dblock(dblock_path)
except Exception as fail:
raise Mkh5DblockPathError(str(fail), mkh5_f, dblock_path)
info, montage = _hdr_dblock_to_info_montage(hdr, apparatus_yaml=apparatus_yaml)
# mne wants homogenous n_chans x nsamps, so stim, misc ints coerced
# to float ... sigh.
mne_data = np.ndarray(shape=(len(dblock.dtype.names), len(dblock)), dtype="f8")
# slice out and scale mkh5 native uV to mne FIFFV_UNIT_V
for jdx, stream in enumerate(dblock.dtype.names):
# true by construction unless tampered with
assert info["ch_names"][jdx] == stream
assert hdr["streams"][stream]["jdx"] == jdx
# CRITICAL ... mkh5 EEG are native uV, MNE are V
if "dig_chan" in hdr["streams"][stream]["source"]:
mne_data[jdx] = dblock[stream] * MKH5_EEG_UNIT
else:
mne_data[jdx] = dblock[stream] * 1.0
# create the raw object
raw_dblock = mne.io.RawArray(mne_data, info, copy="both")
raw_dblock.set_montage(montage)
# ------------------------------------------------------------
# add an MNE data column for each epochs_table in the mkh5 file
# - for each one, slice the timelock events at match_time == 0
# - copy the time-locked event code to the epochs column and
# the rest of the epochs tables columns to metadata.
epochs_table_names = mkh5.mkh5(mkh5_f).get_epochs_table_names()
epochs_table_descr = dict() # returned for mkh5 epoching from MNE Raw
log_evcodes, _ = raw_dblock["log_evcodes"] # for checking
if len(epochs_table_names) > 0:
for etn in epochs_table_names:
# fetch the epochs_table and slice for this mkh5 data block
print(f"{dblock_path} setting mkh5 epochs table {etn} events and metadata")
epochs_table = h5data.get_epochs_table(etn)
etn_dblock = (
epochs_table.query("dblock_path == @dblock_path and match_time==0")
).copy()
# CRITICAL: The mkh5 epoch table indexes HDF5 data by
# dblock_path, dblock_tick (row offset), the row sort
# order is undefined. MNE squawks if event array
# sample indexes are not monotonically increasing.
etn_dblock.sort_values("dblock_ticks", inplace=True)
# capture epochs table as data frame for later
epochs_table_descr[etn] = etn_dblock
# container for the new column of event codes
etn_evcodes = np.zeros(
(1, len(raw_dblock)), dtype=raw_dblock.get_data()[0].dtype
) # yes, (1, len) b.c. MNE wants chan x time
# CRITICAL: copy over log_evcodes at just the epoch event ticks
etn_evcodes[0, etn_dblock.dblock_ticks] = etn_dblock.log_evcodes
# true by construction of mkh5 except MNE is dtype float
assert all(
log_evcodes[0, etn_dblock.dblock_ticks]
== etn_evcodes[0, etn_dblock.dblock_ticks]
)
# clone the log_evcodes to get their MNE info attributes
etn_event_channel = raw_dblock.copy().pick(["log_evcodes"])
# rename and hack in the correct scanno, logno in case it matters
mne.rename_channels(etn_event_channel.info, {"log_evcodes": etn})
for field in ["scanno", "logno"]:
etn_event_channel.info["chs"][0][field] = (
raw_dblock.info["chs"][-1][field] + 1
)
# set the event code data values append the channel and confirm
# MNE agrees when asked in its native tongue.
etn_event_channel._data = etn_evcodes
raw_dblock.add_channels([etn_event_channel])
assert all(
raw_dblock["log_evcodes"][0][0, etn_dblock.dblock_ticks]
== raw_dblock[etn][0][0, etn_dblock.dblock_ticks]
)
# ------------------------------------------------------------
# seed the MNE annotations with the data block path at time == 0
raw_dblock.set_annotations(
mne.Annotations(onset=0.0, duration=0.0, description=dblock_path)
)
# add log_evocdes garv annotations, if any. validated in _check_api_params
if garv_annotations:
print(f"annotating garv artifacts {garv_annotations}")
bad_garvs = get_garv_bads(raw_dblock, **garv_annotations)
raw_dblock.set_annotations(raw_dblock.annotations + bad_garvs)
return raw_dblock, epochs_table_descr
|
cee5823fa1ab4e3cd9be345cf83858bd61a01b51
| 3,640,262
|
def _get_positive_mask(positive_selection, cls_softmax, cls_gt):
"""Gets the positive mask based on the ground truth box classifications
Args:
positive_selection: positive selection method
(e.g. 'corr_cls', 'not_bkg')
cls_softmax: prediction classification softmax scores
cls_gt: ground truth classification one-hot vector
Returns:
positive_mask: positive mask
"""
# Get argmax for predicted class
classification_argmax = tf.argmax(cls_softmax, axis=1)
# Get the ground truth class indices back from one_hot vector
class_indices_gt = tf.argmax(cls_gt, axis=1)
# class_indices_gt = tf.Print(class_indices_gt, ['^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^line 88(pplp loss) : class_indices_gt =', class_indices_gt], summarize=1000)
# Mask for which predictions are not background
not_background_mask = tf.greater(class_indices_gt, 0)
# Combine the masks
if positive_selection == 'corr_cls':
# Which prediction classifications match ground truth
correct_classifications_mask = tf.equal(
classification_argmax, class_indices_gt)
positive_mask = tf.logical_and(
correct_classifications_mask, not_background_mask)
elif positive_selection == 'not_bkg':
positive_mask = not_background_mask
else:
raise ValueError('Invalid positive selection', positive_selection)
return positive_mask
|
8a8e4317f99f691c038b40d7656656f532eba884
| 3,640,263
|
def max_min_index(name_index):
"""Return maximum and minimum value with country of a column from df."""
country_and_name = df_copy[["country", name_index]]
counrties_in_name_index = country_and_name.sort_values(name_index).dropna()
min_value = [
list(counrties_in_name_index[name_index])[0],
list(counrties_in_name_index["country"])[0],
]
max_value = [
list(counrties_in_name_index[name_index])[-1],
list(counrties_in_name_index["country"])[-1],
]
return max_value, min_value
|
7bce42a4d05b52b8e6f0a6d91cecf7775a9484a4
| 3,640,264
|
def add_bold_line(latex: str, index: int) -> str:
"""Makes a provided line number bold
"""
lines = latex.splitlines()
cells = lines[index].split("&")
lines[index] = r'\bfseries ' + r'& \bfseries '.join(cells)
return '\n'.join(lines)
|
637338ea9ec576c780ccfa0b37d47a670465cdbb
| 3,640,265
|
def square(V,resp,weight):
"""Computes the expansion coefficients with a least squares regression"""
if np.any(weight):
Vt = V.T
v1 = Vt.dot(np.transpose(weight*Vt))
v2 = Vt.dot(np.transpose(weight*resp.T))
coef = np.linalg.solve(v1,v2)
else: coef = np.linalg.lstsq(V,resp,rcond=None)[0]
return coef
|
076f580a16c233a087edafb1efd57b9fa194a666
| 3,640,266
|
def _unwrap_function(func):
"""Unwrap decorated functions to allow fetching types from them."""
while hasattr(func, "__wrapped__"):
func = func.__wrapped__
return func
|
8c7a9f5b08dc91b9ae2c8387fbd4860bb554d561
| 3,640,267
|
def get_data(exception: bool = True, key_form: str = "data") -> "dict or None":
"""Função captura os dados de uma rota.
A captura é feita caso a rota seja com `JSON` ou `Multipart-form` caso contrario lança um exceção.
As imagens são capturadas na função `get_files`.
`exception` campo boleano opcional que define se é levantado exceções. O valor por padrão é True.
Exceções:
`from app.errors.JSONNotFound` - Body vazio.
"""
data = {}
if request.get_json():
data: dict = request.get_json()
elif request.form.get(key_form):
data: dict = loads(request.form.get(key_form))
if data.get("file"):
data.pop("file")
return data
|
15cd4257e7849d4400231d7e5c2a025b16ad7db5
| 3,640,268
|
def get_dec_log(uid):
"""Convenience method to look up inc_log for a uid."""
rv = query_db('select dec_log from user where uid = ?',
[uid], one=True)
return rv[0] if rv else None
|
8927ce657cbabfd1e0f4e852e6163702ab5b3841
| 3,640,269
|
def removeItem(request):
"""
Removes item from logged in customers basket.
"""
if request.method == 'POST':
cust = User.objects.get(username=request.user.username)
item_id = request.POST["item_id"]
item = Item.objects.get(id=item_id)
b_item = Basket.objects.get(customer=cust, item=item)
if(b_item):
b_item.delete()
return HttpResponse(None)
return HttpResponseBadRequest(None)
|
80514a322f727478311b7a1b49bd9da8ac7b0d28
| 3,640,270
|
def read_file(repo, name):
"""Read JSON files."""
with open(repo + '/' + name + '.txt') as file:
data = [d.rstrip() for d in file.readlines()]
file.close()
return data
|
4d91e4c68a4f132dc6ebb41cc51df66bd555107a
| 3,640,271
|
def add_end_slash(value: str):
""" Added a slash at the end of value """
if type(value) != str:
return value
return value if value.endswith("/") else value + "/"
|
bc8f41898c50120ad7ca8b814ff03d19c1c64c27
| 3,640,272
|
def shift_fill(a, n, axis=0, fill=0.0, reverse=False):
""" shift n spaces backward along axis, filling rest in with 0's. if n is negative, shifts forward. """
shifted = np.roll(a, n, axis=axis)
shifted[:n] = fill
return shifted
|
5287eefe7491442e3192069bce4faf975e54344a
| 3,640,273
|
from typing import Union
def str2bool(v:Union[str, bool]) -> bool:
""" finished, checked,
converts a "boolean" value possibly in the format of str to bool
Parameters
----------
v: str or bool,
the "boolean" value
Returns
-------
b: bool,
`v` in the format of bool
References
----------
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
b = v
elif v.lower() in ("yes", "true", "t", "y", "1"):
b = True
elif v.lower() in ("no", "false", "f", "n", "0"):
b = False
else:
raise ValueError("Boolean value expected.")
return b
|
2f102239ce395ece25022320443ffc6d7183968e
| 3,640,274
|
def _none_or_int_or_list(val):
"""Input conversion - expecting None, int, or a list of ints"""
if val is None:
return None
elif isinstance(val, list):
return list(map(int, val))
else:
return int(val)
|
1958c64175a1cd63f8a42044b40b84d7cf8baed2
| 3,640,275
|
def hour_of_day(datetime_col):
"""Returns the hour from a datetime column."""
return datetime_col.dt.hour
|
18b2f6e16ccbcb488f3863968466fda14f669d8b
| 3,640,276
|
def pad_lists(lists, pad_token, seq_lens_idx=[]):
"""
Pads unordered lists of different lengths to all have the same length (max length) and orders
length descendingly
Arguments:
lists : list of 1d lists with different lengths (list[list[int]])
pad_token : padding value (int)
seq_lens_idx : list of sorted indices (list[int])
Returns:
ordered_lists (list[list[int]]) : List of padded 1d lists with equal lengths, ordered descendingly
from original lengths
ordered_seq_lens (list[int]) : List of sequence lengths of corresponding lists (i.e. len(lst))
seq_lens_idx (list[int]) : Order of original indices in descending sequence length
"""
seq_lens = [len(lst) for lst in lists]
max_seq_len = max(seq_lens)
ordered_seq_lens = []
ordered_lists = []
if len(seq_lens_idx) == 0:
seq_lens_idx = np.flip(np.argsort(seq_lens), axis=0).tolist() # descending indices based on seq_lens
for idx in seq_lens_idx: # for every sample in batch
ordered_lists.append(lists[idx] + [pad_token] * (max_seq_len - len(lists[idx])))
ordered_seq_lens.append(seq_lens[idx])
return ordered_lists, ordered_seq_lens, seq_lens_idx
|
76a2a9934dfca478e7db43a93db7e56c181a3b3f
| 3,640,277
|
def sum_naturals(n):
"""Sum the first N natural numbers
>>> sum_naturals(5)
15
"""
total = 0
k = 1
while k <= n:
total += k
k += 1
return total
|
4c59057cd82083d615c72a59f682dd218a657ea0
| 3,640,278
|
def _gaussian_log_sf(x, mu, sigma):
"""Log SF of a normal distribution."""
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
return _log_ndtr(-(x - mu) / sigma)
|
3f33918bf78fc3ab4064f05d038232df218416f6
| 3,640,279
|
def sort_list_files(list_patches, list_masks):
"""
Sorts a list of patches and masks depending on their id.
:param list_patches: List of name of patches in the folder, that we want to sort.
:param list_masks: List of name of masks in the folder, that we want to sort.
:return: List of sorted lists, respectively of patches and masks.
"""
return sorted(list_patches, key=lambda x: int(x[1])), sorted(list_masks, key=lambda x: int(x[1]))
|
91557475bf145862ea88ad9f86cef82135eddd6c
| 3,640,280
|
def get_model(name):
""" get_model """
if name not in __factory:
raise KeyError("unknown model:", name)
return __factory[name]
|
00fff4e3596aec487b16fd1114b7d026d6790568
| 3,640,281
|
import configparser
def defaults_to_cfg():
""" Creates a blank template cfg with all accepted fields and reasonable default values
Returns:
config (ConfigParser): configuration object containing defaults
"""
config = configparser.ConfigParser(allow_no_value=True)
config.add_section("General")
config.set("General", "prot_file")
config.set("General", "lig_file")
config.set("General", "min_rad", _option_constraints.get("general_min_rad_dflt"))
config.set("General", "max_rad", _option_constraints.get("general_max_rad_dflt"))
config.set("General", "constrain_radii", _option_constraints.get("general_constrain_radii"))
config.add_section("Specification")
config.set("Specification", "mode")
config.set("Specification", "coordinates")
config.set("Specification", "resid")
config.set("Specification", "lig_excl_rad")
config.set("Specification", "lig_incl_rad")
config.set("Specification", "min_volume", _option_constraints.get("specification_min_volume_dflt"))
config.add_section("Partitioning")
config.set("Partitioning", "subdivide", _option_constraints.get("partitioning_subdivide_dflt"))
config.set("Partitioning", "max_clusters")
config.set("Partitioning", "min_subpocket_rad", _option_constraints.get("partitioning_min_subpocket_rad_dflt"))
config.set("Partitioning", "max_subpocket_rad", )
config.set("Partitioning", "min_subpocket_surf_rad", )
config.set("Partitioning", "radial_sampling", )
config.set("Partitioning", "inclusion_radius_buffer")
config.set("Partitioning", "min_cluster_size")
config.add_section("Output")
config.set("Output", "project_dir")
config.set("Output", "prefix")
config.set("Output", "logger_stream_level", "INFO")
config.set("Output", "logger_file_level", "DEBUG")
config.add_section("PyMOL")
config.set("PyMOL", "protein")
config.set("PyMOL", "ligand")
config.set("PyMOL", "protein_only", "False")
config.set("PyMOL", "display_mode", "solid")
config.set("PyMOL", "palette")
config.set("PyMOL", "alpha", "0.85")
return config
|
f521d0caee3c9f062eb78e7d5c2562030a741aaf
| 3,640,282
|
def check_sentence_for_coins(sentence: str) -> str:
"""Returns the corresponding binance pair if a string contains any words refering to a followed coins
Args:
sentence (str): the sentence
Returns:
str: the binance pair if it contains a coins, otherwise returns 'NO_PAIR'
"""
coin = next((word for word in list(TRACKED_COINS.keys()) if word in sentence.lower()), 'NO_PAIR')
if coin != 'NO_PAIR':
return TRACKED_COINS[coin]
return coin
|
a9d041ff6c2bcca1c424b059956c33016f93300d
| 3,640,283
|
def to24Bit(color8Bit):
"""The method allows you to convert the 8-bit index created by the color.to8Bit method to 24-bit color value."""
# We ignore first one, so we need to shift palette indexing by one
return palette[color8Bit + 1]
|
b84d2262883a7b3415ae46f174418dc79ef800dc
| 3,640,284
|
import shlex
import sys
import json
def read_json_method(data):
"""
Reformatting Valve key/value format
to JSON and returning JSON.
"""
# default vars
parent = []
depth = 0
vdict = {}
# init json and add opening bracket
vjson = "{"
# replace tabs with spaces
data.replace("\t", " ")
# split into lines
data = data.splitlines()
# loop through vdf
for index, line in enumerate(data):
# split line string
split = shlex.split(line)
count = len(split)
# set key vars
key = split[0]
# error if unexpected word count of current line
if count > 2:
print(
"The line: "
+ line
+ " contains an invalid number of words. This must be 1 or 2!"
)
sys.exit(1)
# parse next line if not last line
if index == (len(data) - 1):
# set next to false
nextkey = False
# flag this line as last
lastline = True
else:
# get next line
nextline = data[index + 1]
nextsplit = shlex.split(nextline)
nextkey = nextsplit[0]
# flag this line as not last
lastline = False
# check for object start lines
if count == 1 and not key in ["{", "}"]:
# add colon to define object
line = line + " : "
# check for closing bracket and
if key == "}" and nextkey != "}" and not lastline:
# add colon to define object
line = line + ","
# check for key value lines
if count == 2:
# set value var
val = split[1]
# add colon between key/value
line = surround_quotes(key) + " : " + surround_quotes(val)
# check for brackets on next line
if not nextkey in ["{", "}"]:
# add comma to line
line = line + ","
# add edited line to json dict
vjson = vjson + line
# add closing bracket
vjson = vjson + "}"
# parse json to dict
try:
vdict = json.loads(vjson)
except Exception as parse_error:
# print json parse error and set dict to false
print(
"The following error occured while trying to parse the VDF app"
+ "info returned from steamcmd: the query string: \n > "
+ str(parse_error)
)
vdict = False
# set error exit code
sys.exit(1)
return vdict
|
f8b266145f44e99c83fd5e0dbe2ccd8e60e4feb2
| 3,640,285
|
def make_bag():
"""Create a bag."""
return from_sequence(
[1, 2], npartitions=2
).map(allocate_50mb).sum().apply(no_allocate)
|
8c939dd389ab09811dbbf77aaf479832ab7463d0
| 3,640,286
|
def get_testdata_files(pattern="*"):
""" Return test data files from dicom3d data root directory """
data_path = join(DATA_ROOT, 'test_files')
files = walk_data(
base=data_path, pattern=pattern,
search_files=True, search_dirs=False)
return [filename for filename in files if not filename.endswith('.py')]
|
9739db355914b288b2fcb29874fcc50d6a2b4487
| 3,640,287
|
def safe_text(obj):
"""Safely turns an object into a textual representation.
Calls str(), then on Python 2 decodes the result.
"""
result = qcore.safe_str(obj)
if isinstance(result, bytes):
try:
result = result.decode("utf-8")
except Exception as e:
result = "<n/a: .decode() raised %r>" % e
return result
|
4d20d5c42b79b6dbb6f8282d74bfd461ffd1dc75
| 3,640,288
|
from typing import List
def evaluate_blueprints(blueprint_q: mp.Queue,
input_size: List[int]) -> List[BlueprintGenome]:
"""
Consumes blueprints off the blueprints queue, evaluates them and adds them back to the queue if all of their
evaluations have not been completed for the current generation. If all their evaluations have been completed, add
them to the completed_blueprints list.
:param blueprint_q: A thread safe queue of blueprints
:param input_size: The shape of the input to each network
:param num_epochs: the number of epochs to train each model for
:return: A list of evaluated blueprints
"""
completed_blueprints: List[BlueprintGenome] = []
print(f'Process {mp.current_process().name} - epochs: {config.epochs_in_evolution}')
while blueprint_q.qsize() != 0:
blueprint = blueprint_q.get()
blueprint = evaluate_blueprint(blueprint, input_size)
if blueprint.n_evaluations == config.n_evals_per_bp:
completed_blueprints.append(blueprint)
else:
blueprint_q.put(blueprint)
return completed_blueprints
|
c87d0a37fe32d2af6594564afc44d16adf616737
| 3,640,289
|
import tempfile
import shlex
def run_noble_coder(text, noble_coder):
"""
Run Noble Coder
Args:
text: the text to feed into Noble Coder
noble_coder: the execution path of Noble Coder
Returns:
The perturbation agent
"""
pert_agent = None
with tempfile.TemporaryDirectory() as dirname:
with open("{}/tmp.txt".format(dirname), "w") as f:
f.write(text)
command = "java -jar {noble_coder} -terminology NCI_Thesaurus " \
"-input {dirname} -output {dirname}".format(noble_coder=noble_coder, dirname=dirname)
proc = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
out, err = [x.decode("utf-8") for x in proc.communicate()]
if proc.returncode != 0 or "[Errno" in err:
raise RuntimeError("Noble Coder failed to complete\nstdout: {}\n stderr {}".format(out, err))
data = tablib.Dataset().load(open("{}/RESULTS.tsv".format(dirname)).read())
for row in data:
if "Amino Acid" in row[4] and "Pharmacologic Substance" not in row[4]:
pert_agent = row[3]
break
return pert_agent
|
20bcee89ddda7c8abc108a42621867017a7e4d63
| 3,640,290
|
def xor(*args):
"""True if exactly one of the arguments of the iterable is True.
>>> xor(0,1,0,)
True
>>> xor(1,2,3,)
False
>>> xor(False, False, False)
False
>>> xor("kalimera", "kalinuxta")
False
>>> xor("", "a", "")
True
>>> xor("", "", "")
False
"""
return sum([bool(i) for i in args]) == 1
|
86bbe0350dd18a2508120cec9672661e1aa56ce0
| 3,640,291
|
from utils import check_dir
import coeff
from version import __version__
def proc(log_file, thread, circ_file, hisat_bam, rnaser_file, reads, outdir, prefix, anchor, lib_type):
"""
Build pseudo circular reference index and perform reads re-alignment
Extract BSJ and FSJ reads from alignment results
Returns
-----
str
output file name
"""
circ_dir = '{}/circ'.format(outdir)
check_dir(circ_dir)
circ_fasta = '{}/circ/{}_index.fa'.format(outdir, prefix)
circ_info = load_bed(circ_file)
if rnaser_file:
LOGGER.info('Loading RNase R results')
rnaser_exp, rnaser_stat = update_info(circ_info, rnaser_file)
# extract fasta file for reads alignment
generate_index(log_file, circ_info, circ_fasta)
# hisat2-build index
denovo_index = build_index(log_file, thread, circ_fasta, outdir, prefix)
LOGGER.debug('De-novo index: {}'.format(denovo_index))
# hisat2 de novo alignment for candidate reads
denovo_bam = denovo_alignment(log_file, thread, reads, outdir, prefix)
LOGGER.debug('De-novo bam: {}'.format(denovo_bam))
# Find BSJ and FSJ informations
cand_bsj = proc_denovo_bam(denovo_bam, thread, circ_info, anchor, lib_type)
bsj_reads, fsj_reads = proc_genome_bam(hisat_bam, thread, circ_info, cand_bsj, anchor, circ_dir)
total_reads, mapped_reads = bam_stat(hisat_bam)
circ_reads = sum([len(bsj_reads[i]) for i in bsj_reads]) * 2
sample_stat = (total_reads, mapped_reads, circ_reads)
sample_exp = expression_level(circ_info, bsj_reads, fsj_reads)
# circRNA annotation
header = [
'Sample: {}'.format(prefix),
'Total_Reads: {}'.format(total_reads),
'Mapped_Reads: {}'.format(mapped_reads),
'Circular_Reads: {}'.format(circ_reads),
]
out_file = '{}/{}.gtf'.format(outdir, prefix)
if rnaser_file:
tmp_header, circ_exp = coeff.correction(sample_exp, sample_stat, rnaser_exp, rnaser_stat)
header += tmp_header
else:
circ_exp = sample_exp
header += ['version: {}'.format(__version__), ]
gtf_info = index_annotation(utils.GTF)
format_output(circ_info, circ_exp, sample_stat, header, gtf_info, out_file)
return out_file
|
f0cbe9a93cf81a0177895602bc706c08841d30c0
| 3,640,292
|
def zjitter(jitter=0.0, radius=5):
"""
scan jitter is in terms of the fractional pixel difference when
moving the laser in the z-direction
"""
psfsize = np.array([2.0, 1.0, 3.0])
# create a base image of one particle
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
sl = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,s0.pad:-s0.pad]
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[sl]
position = 0*s0.obj.pos[0]
for i in xrange(finalimage.shape[0]):
offset = jitter*np.random.randn(3)*np.array([1,0,0])
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage[i] = s0.get_model_image()[sl][i]
position += s0.obj.pos[0]
position /= float(finalimage.shape[0])
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
# measure the true inferred parameters
return s, finalimage, position
|
7fae2f750cd80708e7cd881a05d535a00b4ecb38
| 3,640,293
|
def append_column(rec, col, name=None, format=None):
"""
Append a column to the end of a records array.
Parameters
----------
rec : recarray
Records array.
col : array_like
Array or similar object which will be converted into the new column.
name : str, optional
Name of the column. If None col.dtypes.name will be used.
format : dtype, optional
Data type to convert the new column into before appending. Required if
col is not an ndarray.
Returns
-------
new_rec : recarray
New records array with column appended.
"""
N = len(rec.dtype.descr)
return insert_column(rec, N, col, name, format)
|
f851ef69946937cbb100d424ddb8502b906940bd
| 3,640,294
|
import copy
def dfa2nfa(dfa):
"""Copy DFA to an NFA, so remove determinism restriction."""
nfa = copy.deepcopy(dfa)
nfa.transitions._deterministic = False
nfa.automaton_type = 'Non-Deterministic Finite Automaton'
return nfa
|
eed8e651a51e71599a38288665604add3d8a0a3d
| 3,640,295
|
def count_related_m2m(model, field):
"""Return a Subquery suitable for annotating a m2m field count."""
subquery = Subquery(model.objects.filter(**{"pk": OuterRef("pk")}).order_by().annotate(c=Count(field)).values("c"))
return Coalesce(subquery, 0)
|
e772c8d55a1d9778077eaf9cbebfc51948361e1c
| 3,640,296
|
def create_car(sql: Session, car: CarsCreate):
"""
Create a record of car with its Name & Price
"""
new_car = Cars(
Name=car.Name,
Price=car.Price
)
sql.add(new_car)
sql.commit()
sql.refresh(new_car)
return new_car
|
33fda0b653950989bd1b61ae1ee689db05576877
| 3,640,297
|
import time
def train(train_loader, model, criterion, optimizer, args, epoch):
"""Train process"""
'''Set up configuration'''
batch_time = AverageMeter()
data_time = AverageMeter()
am_loss = AverageMeter()
vec_loss = AverageMeter()
dis_loss = AverageMeter()
ske_loss = AverageMeter()
kps_loss = AverageMeter()
last = time.time()
model.train()
bar = Bar('\033[31m Train \033[0m', max=len(train_loader))
'''Start Training'''
for i, sample in enumerate(train_loader):
data_time.update(time.time() - last)
results, targets, loss = one_forward_pass(
sample, model, criterion, args, is_training=True
)
'''Update the loss after each sample'''
am_loss.update(
loss[0].item(), targets['batch_size']
)
vec_loss.update(
loss[1].item(), targets['batch_size']
)
dis_loss.update(
loss[2].item(), targets['batch_size']
)
ske_loss.update(
loss[3].item(), targets['batch_size']
)
kps_loss.update(
loss[4].item(), targets['batch_size']
)
''' backward and step '''
optimizer.zero_grad()
# loss[1].backward()
if epoch < 60:
loss[5].backward()
else:
loss[0].backward()
optimizer.step()
''' progress '''
batch_time.update(time.time() - last)
last = time.time()
bar.suffix = (
'({batch}/{size}) '
'l: {loss:.5f} | '
'lV: {lossV:.5f} | '
'lD: {lossD:.5f} | '
'lM: {lossM:.5f} | '
'lK: {lossK:.5f} | '
).format(
batch=i + 1,
size=len(train_loader),
loss=am_loss.avg,
lossV=vec_loss.avg,
lossD=dis_loss.avg,
lossM=ske_loss.avg,
lossK=kps_loss.avg
)
bar.next()
bar.finish()
return am_loss.avg
|
fb83156f3915f34dd92d4060b62b77644dd65f8b
| 3,640,298
|
import os
import yaml
def fetch_folder_config(path):
"""Fetch config file of folder.
Args:
path (str): path to the wanted folder of the config.
Returns:
dict. the loaded config file.
"""
config = {}
config_path = os.path.join(path, DOC_YML)
if os.path.exists(config_path):
with open(config_path) as config_file:
config = yaml.load(config_file)
return config
|
b78393e8d97147227c0648050f749e48e13a54b5
| 3,640,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.