content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def run():
"""Runs the main script"""
mobiles = []
route_list_html = requests.get(WEBSITE + ROUTES_LIST)
route_list_soup = BeautifulSoup(route_list_html.text, 'html.parser')
route_links = []
for link in route_list_soup.find_all('a'):
if 'mobile-library-route' in link.get('href'):
route_links.append(link.get('href'))
for link in route_links:
route_text = ''
route_encoded_link = str(
base64.urlsafe_b64encode(link.encode("utf-8")), 'utf-8')
if not path.exists('../raw/essex/' + route_encoded_link + '.txt'):
route_html = requests.get(link)
route_text = route_html.text
# save the data out as web scraping seems to be getting blocked so may take a few goes
route_file = open('../raw/essex/' +
route_encoded_link + '.txt', "w")
route_file.write(route_text)
route_file.close()
time.sleep(10)
else:
route_text = open('../raw/essex/' +
route_encoded_link + '.txt', 'r').read()
route_soup = BeautifulSoup(route_text, 'html.parser')
stop_links = []
for stop_link in route_soup.find_all('table')[0].find_all('a'):
stop_links.append(WEBSITE + stop_link.get('href'))
for stop in stop_links:
stop_text = ''
stop_encoded_link = str(base64.urlsafe_b64encode(
stop.encode("utf-8")), 'utf-8')
if not path.exists('../raw/essex/' + stop_encoded_link + '.txt'):
stop_html = requests.get(stop)
stop_text = stop_html.text
stop_file = open('../raw/essex/' +
stop_encoded_link + '.txt', "w")
stop_file.write(stop_text)
stop_file.close()
else:
stop_text = open('../raw/essex/' +
stop_encoded_link + '.txt', 'r').read()
stop_soup = BeautifulSoup(stop_text, 'html.parser')
values = stop_soup.find_all('div', {"class": "pfont"})
stop_name = stop_soup.find_all(
'div', {"class": "yellow-wrapper"})[0].find("h1").text
community = values[0].text.strip().splitlines()[0].strip()
address = stop_name + ', ' + community
postcode = values[0].text.strip().splitlines()[-1].strip()
if postcode == 'CM133AS':
postcode = 'CM132AS'
if postcode == 'RM4 1ED':
postcode = 'RM4 1LU'
frequency = 'FREQ=WEEKLY;INTERVAL=' + values[1].text.strip()[:1]
day = values[2].text.strip()
times = values[3].text.strip()
route_mobile = values[4].text.strip()
route = 'Week ' + route_mobile.split('week')[1].strip() + ' ' + day
mobile_library = route_mobile.split('week')[0].strip()
start = values[6].text.strip()
start = datetime.strptime(start, '%d %B %Y')
start = start.strftime('%Y-%m-%d')
arrival = times.split('to')[0].replace('am', '').replace(
'pm', '').strip().replace('.', '')
arrival_hours = '00'
arrival_mins = '00'
if len(arrival) == 1:
arrival_hours = arrival.rjust(2, '0')
if len(arrival) == 2:
arrival_hours = arrival
if len(arrival) == 3:
arrival_hours = arrival[0:1].rjust(2, '0')
arrival_mins = arrival[1:3]
if len(arrival) == 4:
arrival_hours = arrival[0:2]
arrival_mins = arrival[2:4]
if int(arrival_hours) < 8:
arrival_hours = int(arrival_hours) + 12
arrival = str(arrival_hours) + ':' + arrival_mins
departure = times.split('to')[1].replace(
'am', '').replace('pm', '').strip().replace('.', '')
departure_hours = '00'
departure_mins = '00'
if len(departure) == 1:
departure_hours = departure.rjust(2, '0')
if len(departure) == 2:
departure_hours = departure
if len(departure) == 3:
departure_hours = departure[0:1].rjust(2, '0')
departure_mins = departure[1:3]
if len(departure) == 4:
departure_hours = departure[0:2]
departure_mins = departure[2:4]
if int(departure_hours) < 8:
departure_hours = int(departure_hours) + 12
departure = str(departure_hours) + ':' + departure_mins
url = 'https://api.postcodes.io/postcodes/' + postcode
postcode_request = requests.get(url)
postcode_data = json.loads(postcode_request.text)
latitude = postcode_data['result']['latitude']
longitude = postcode_data['result']['longitude']
mobiles.append(
[mobile_library, route, community, stop_name, address, postcode, longitude, latitude,
day, 'Public', arrival, departure, frequency, start, '', '', stop]
)
create_mobile_library_file('Essex', 'essex.csv', mobiles) | 5,333,000 |
def walk_dataset_timit(dataset_path: Path, hp: Map):
"""
Walk through all .wav files in the TIMIT dataset.
:param dataset_path: path to TIMIT dataset
:param hp: hyperparameters object
:return: a generator of data samples
"""
for path in dataset_path.rglob("*.WAV"):
yield preprocess_wav_timit(path, hp) | 5,333,001 |
def calc_wave_number(g, h, omega, relax=0.5, eps=1e-15):
"""
Relaxed Picard iterations to find k when omega is known
"""
k0 = omega ** 2 / g
for _ in range(100):
k1 = omega ** 2 / g / tanh(k0 * h)
if abs(k1 - k0) < eps:
break
k0 = k1 * relax + k0 * (1 - relax)
else:
ocellaris_error(
'calc_wave_number did not converge',
'Input g=%r h=%r omega=%r, tolerance=%e' % (g, h, omega, eps),
)
return k1 | 5,333,002 |
def test_publish(session: nox.Session) -> None:
"""Publish this project to test pypi."""
publish(session, test=True) | 5,333,003 |
def draw_contour_2d(points):
"""Draws contour of the 2D figure based on the order of the points.
:param points: list of numpy arrays describing nodes of the figure.
"""
xs, ys = zip(points[-1], *points)
plt.plot(xs, ys, color="blue") | 5,333,004 |
def emit_live_notification_for_model(obj, user, history, *, type:str="change", channel:str="events",
sessionid:str="not-existing"):
"""
Sends a model live notification to users.
"""
if obj._importing:
return None
content_type = get_typename_for_model_instance(obj)
if content_type == "userstories.userstory":
if history.type == HistoryType.create:
title = _("User story created")
url = resolve("userstory", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("User story changed")
url = resolve("userstory", obj.project.slug, obj.ref)
else:
title = _("User story deleted")
url = None
body = _("US #{} - {}").format(obj.ref, obj.subject)
elif content_type == "tasks.task":
if history.type == HistoryType.create:
title = _("Task created")
url = resolve("task", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Task changed")
url = resolve("task", obj.project.slug, obj.ref)
else:
title = _("Task deleted")
url = None
body = _("Task #{} - {}").format(obj.ref, obj.subject)
elif content_type == "issues.issue":
if history.type == HistoryType.create:
title = _("Issue created")
url = resolve("issue", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Issue changed")
url = resolve("issue", obj.project.slug, obj.ref)
else:
title = _("Issue deleted")
url = None
body = _("Issue: #{} - {}").format(obj.ref, obj.subject)
elif content_type == "wiki.wiki_page":
if history.type == HistoryType.create:
title = _("Wiki Page created")
url = resolve("wiki", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Wiki Page changed")
url = resolve("wiki", obj.project.slug, obj.slug)
else:
title = _("Wiki Page deleted")
url = None
body = _("Wiki Page: {}").format(obj.slug)
elif content_type == "milestones.milestone":
if history.type == HistoryType.create:
title = _("Sprint created")
url = resolve("taskboard", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Sprint changed")
url = resolve("taskboard", obj.project.slug, obj.slug)
else:
title = _("Sprint deleted")
url = None
body = _("Sprint: {}").format(obj.name)
else:
return None
return emit_event(
{
"title": title,
"body": "Project: {}\n{}".format(obj.project.name, body),
"url": url,
"timeout": 10000,
"id": history.id
},
"live_notifications.{}".format(user.id),
sessionid=sessionid
) | 5,333,005 |
def is_chitoi(tiles):
"""
Returns True if the hand satisfies chitoitsu.
"""
unique_tiles = set(tiles)
return (len(unique_tiles) == 7 and
all([tiles.count(tile) == 2 for tile in unique_tiles])) | 5,333,006 |
def get_hpo_ancestors(hpo_db, hpo_id):
"""
Get HPO terms higher up in the hierarchy.
"""
h=hpo_db.hpo.find_one({'id':hpo_id})
#print(hpo_id,h)
if 'replaced_by' in h:
# not primary id, replace with primary id and try again
h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})
hpo=[h]
if 'is_a' not in h: return hpo
for hpo_parent_id in h['is_a']:
#p=hpo_db.hpo.find({'id':hpo_parent_id}):
hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo | 5,333,007 |
async def download_category(soup, path, category):
""" Category Wise """
event_names = []
event_links = []
body = soup.find("div", class_="list_wrapper row")
for event in body.find_all("li"):
event_year = event.get("data-sid")
event_name = event_year + " " + event.div.a.picture.img.get("alt", "")
event_names.append(event_name)
event_links.append(event.div.a.get("href", ""))
os.chdir(path)
make_dir("moto")
make_dir(category)
print(f">>Downloading '{category}' category")
for i, event in enumerate(event_links):
year_of_event = event_names[i][:4]
name_of_event = event_names[i][5:]
await download_event(event, year_of_event, name_of_event) | 5,333,008 |
def mcBufAir(params: dict, states: dict) -> float:
"""
Growth respiration
Parameters
----------
params : dict
Parameters saved as model constants
states : dict
State variables of the model
Returns
-------
float
Growth respiration of the plant [mg m-2 s-1]
"""
mcBufAir_ = (mcOrgAir_g(organ="fruit", params=params, states=states) +
mcOrgAir_g(organ="leaf", params=params, states=states) +
mcOrgAir_g(organ="stem", params=params, states=states))
return mcBufAir_ | 5,333,009 |
def gene_calling (workflow, assembly_dir, assembly_extentsion, input_dir, extension, extension_paired,
gene_call_type, prokka_dir, prodigal_dir,
threads,
gene_file, gene_PC_file, protein_file, protein_sort,
gene_info, complete_gene, complete_protein):
"""
This set of tasks will run gene-calling workflow.
Args:
workflow (anadama2.workflow): An instance of the workflow class.
assembly_dir: The direcory path of assembly results.
sample_file: The sample list file.
prokka_dir: The direcory path of prokka results.
prodigal_dir: The direcory path of prodigal results.
gene_file: The fasta file of gene nucleotide sequences.
gene_PC_file: The fasta file of protein coding gene nucleotide sequences.
protein_file: The fasta file of protein sequences.
protein_sort: The sorted fasta file of protein sequences.
gene_info: The summaized gene calling file.
complete_gene: The fasta file of gene nucleotide sequences for complete ORFs.
complete_protein: The fasta file of protein sequences for complete ORFs.
Requires:
prokka 1.14-dev: rapid prokaryotic genome annotation (recommend to close '-c' parameter in prodigal)
prodigal v2.6: gene prediction
usearch (tested with usearch v9.0.2132_i86linux64)
assembled contig files
Returns:
string: name of gene files
Example:
from anadama2 import Workflow
from MetaWIBELE.characterize import characterization
# create an anadama2 workflow instance
workflow=Workflow()
# add gene calling tasks
mygene, myprotein = preprocessing_tasks.gene_calling (workflow, assembly_dir, args.sample_file,
prokka_dir, prodigal_dir,
gene_file, gene_PC_file, protein_file, protein_sort,
gene_info, complete_gene, complete_protein)
# run the workflow
workflow.go()
"""
config.logger.info("###### Start gene_calling module ######")
time_equation = config.time # xxx hours defined in global config
mem_equation = config.memory # xxx GB defined in global config
# ================================================
# collect sequences
# ================================================
if extension_paired:
extension_paireds = extension_paired.split(",")
sample_files = utilities.find_files(input_dir, extension_paireds[0], None)
samples = utilities.sample_names(sample_files, extension_paireds[0], None)
else:
sample_files = utilities.find_files(input_dir, extension, None)
samples = utilities.sample_names(sample_files, extension, None)
sequence_files = []
for mysample in samples:
myfile = os.path.join(assembly_dir, mysample, mysample + "%s" % assembly_extentsion)
sequence_files.append(myfile)
# foreach sample
filtered_contigs = sequence_files
# ================================================
# Gene calling
# ================================================
fna_file = []
faa_file = []
gff_files = []
fna_file_tmp = []
faa_file_tmp = []
gff_files_tmp = []
## Using Prodigal
if gene_call_type == "prodigal" or gene_call_type == "both":
os.system("mkdir -p " + prodigal_dir)
for contig in filtered_contigs:
contig_base = os.path.basename(contig).split(os.extsep)[0]
annotation_dir = os.path.join(prodigal_dir, contig_base)
os.system("mkdir -p " + annotation_dir)
gff_file = os.path.join(annotation_dir, '%s.gff' % contig_base)
cds_file = os.path.join(annotation_dir, '%s.fna' % contig_base)
cds_aa = os.path.join(annotation_dir, '%s.faa' % contig_base)
score = os.path.join(annotation_dir, '%s.gene_score.txt' % contig_base)
stdout_log = os.path.join(annotation_dir, '%s.stdout.log' % contig_base)
faa_file_tmp.append(cds_aa)
workflow.add_task_gridable('prodigal -m -p meta -i [depends[0]] '
'-f gff -o [targets[0]] -d [targets[1]] -s [targets[3]] '
'-a [targets[2]] '
'>[args[0]] 2>&1',
depends = [contig, TrackedExecutable("prodigal")],
targets = [gff_file, cds_file, cds_aa, score],
args = [stdout_log],
cores = threads,
mem = mem_equation,
time = time_equation,
name = contig_base + "__prodigal")
for myfile in faa_file_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prodigal_dir, myname)
faa_file.append(myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfile],
targets = [myfile_new],
cores = 1,
name = "ln__" + myname)
myfna = re.sub(".faa", ".fna", myfile)
myfna_new = re.sub(".faa", ".fna", myfile_new)
if gene_call_type == "prodigal":
fna_file.append(myfna_new)
prokka_dir = prodigal_dir
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfna],
targets = [myfna_new],
cores = 1,
name = "ln__" + myname)
mygff = re.sub(".faa", ".gff", myfile)
mygff_new = re.sub(".faa", ".gff", myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [mygff],
targets = [mygff_new],
cores = 1,
name = "ln__" + myname)
if gene_call_type == "prokka" or gene_call_type == "both":
## Calling genes with Prokka
os.system("mkdir -p " + prokka_dir)
for contig in filtered_contigs:
contig_base = os.path.basename(contig).split(os.extsep)[0]
sample = os.path.basename(contig_base)
annotation_dir = os.path.join(prokka_dir, sample)
os.system("mkdir -p " + annotation_dir)
stdout_log = os.path.join(annotation_dir, '%s.prokka.bacteria.stdout.log' % contig_base)
score = os.path.join(annotation_dir, '%s.gene_score.txt' % contig_base)
gene_nuc = os.path.join(annotation_dir, '%s.ffn' % contig_base)
gene_aa = os.path.join(annotation_dir, '%s.faa' % contig_base)
gff_file = os.path.join(annotation_dir, '%s.gff' % contig_base)
fna_file_tmp.append(gene_nuc)
gff_files_tmp.append(gff_file)
workflow.add_task_gridable('prokka --prefix [args[0]] --addgenes --addmrna --force --metagenome '
'--cpus [args[2]] '
'--outdir [args[1]] [depends[0]] '
'>[args[3]] 2>&1 ',
depends = [contig, TrackedExecutable("prokka")],
targets = [gene_nuc, gene_aa, gff_file],
args = [sample, annotation_dir, threads, stdout_log],
cores = threads,
mem = mem_equation,
time = time_equation,
name = contig_base + "__prokka")
for myfile in gff_files_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prokka_dir, myname)
gff_files.append(myfile_new)
for myfile in fna_file_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prokka_dir, myname)
fna_file.append(myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfile],
targets = [myfile_new],
cores = 1,
name = "ln__" + myname)
myfaa = re.sub(".ffn", ".faa", myfile)
myfaa_new = re.sub(".ffn", ".faa", myfile_new)
if gene_call_type == "prokka":
faa_file.append(myfaa_new)
prodigal_dir = prokka_dir
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfaa],
targets = [myfaa_new],
cores = 1,
name = "ln__" + myname)
mygff = re.sub(".ffn", ".gff", myfile)
mygff_new = re.sub(".ffn", ".gff", myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [mygff],
targets = [mygff_new],
cores = 1,
name = "ln__" + myname)
# ================================================
# Summarize sequences
# ================================================
#mem_equation = "50000"
### combine gene sequences ###
nuc_type = "ffn"
if gene_call_type == "prodigal":
nuc_type = "fna"
mylog = re.sub(".fna", ".log", gene_file)
workflow.add_task('metawibele_combine_gene_sequences -p [args[0]] -e [args[1]] -o [targets[0]] > [args[2]] 2>&1 ',
depends = utilities.add_to_list(fna_file,TrackedExecutable("metawibele_combine_gene_sequences")),
targets = [gene_file],
args = [prokka_dir, nuc_type, mylog],
cores = 1,
name = "combine_gene_sequences")
### combine protein sequences ###
## collect sequences
mylog = re.sub(".faa", ".log", protein_file)
workflow.add_task('metawibele_format_protein_sequences -p [args[0]] -q [args[1]] -e faa -o [targets[0]] '
'-m [targets[1]] >[args[2]] 2>&1 ',
depends = utilities.add_to_list(faa_file, TrackedExecutable("metawibele_format_protein_sequences")) + gff_files,
targets = [protein_file, gene_info],
args = [prokka_dir, prodigal_dir, mylog],
cores = 1,
name = "format_protein_sequences")
## sort by length and filter out short-length sequence
mylog = re.sub(".faa", ".log", protein_sort)
workflow.add_task('usearch -sortbylength [depends[0]] '
'-fastaout [targets[0]] -minseqlength 0 >[args[0]] 2>&1 ',
depends = [protein_file, TrackedExecutable("usearch")],
targets = [protein_sort],
args = [mylog],
cores = 1,
name = "usearch__sorting")
## extract nucleotide sequence for protein coding genes
mylog = re.sub(".fna", ".log", gene_PC_file)
workflow.add_task(
'metawibele_extract_protein_coding_genes -g [depends[0]] -p [depends[1]] -o [targets[0]] > [args[0]] 2>&1 ',
depends = [gene_file, protein_sort, TrackedExecutable("metawibele_extract_protein_coding_genes")],
targets = [gene_PC_file],
args = [mylog],
cores = 1,
name = "extract_protein_coding_genes")
## extract sequences
mylog = re.sub(".fna", ".log", complete_gene)
workflow.add_task(
'metawibele_extract_complete_ORF_seq -t complete -m [depends[0]] -i [depends[1]] -o [targets[0]] >[args[0]] 2>&1',
depends = [gene_info, gene_PC_file, TrackedExecutable("metawibele_extract_complete_ORF_seq")],
targets = [complete_gene],
args = [mylog],
cores = 1,
name = 'extract_complete_ORF_seq')
mylog = re.sub(".faa", ".log", complete_protein)
workflow.add_task(
'metawibele_extract_complete_ORF_seq -t complete -m [depends[0]] -i [depends[1]] -o [targets[0]] >[args[0]] 2>&1',
depends = [gene_info, protein_sort, TrackedExecutable("metawibele_extract_complete_ORF_seq")],
targets = [complete_protein],
args = [mylog],
cores = 1,
name = 'extract_complete_ORF_seq')
return complete_gene, complete_protein | 5,333,010 |
def space_boundaries_re(regex):
"""Wrap regex with space or end of string."""
return rf"(?:^|\s)({regex})(?:\s|$)" | 5,333,011 |
def get_jobs():
"""
this function will query USAJOBS api and return all open FEC jobs.
if api call failed, a status error message will be displayed in the
jobs.html session in the career page.
it also query code list to update hirepath info. a hard-coded code list
is used for backup if query failed.
"""
# url = 'https://data.usajobs.gov/api/Search'
# codes_url = 'https://data.usajobs.gov/api/codelist/hiringpaths'
querystring = {}
querystring["Organization"] = settings.USAJOBS_AGENCY_CODE
querystring["WhoMayApply"] = settings.USAJOBS_WHOMAYAPPLY
headers = {
"authorization-key": settings.USAJOBS_API_KEY,
"host": "data.usajobs.gov",
"cache-control": "no-cache",
}
# query usajobs API for all open fec jobs
response = requests.get(JOB_URL, headers=headers, params=querystring)
if response.status_code != 200:
return {"error": USAJOB_SEARCH_ERROR}
responses = response.json()
# query usajobs API for list of all hiring-path codes
codes_response = requests.get(CODES_URL, headers=headers)
if codes_response.status_code != 200:
codes_responses = json.loads(CODE_LIST)
else:
codes_responses = codes_response.json()
jobData = []
search_results = responses.get("SearchResult", {})
# iterate over returned job data
if "SearchResultItems" in search_results:
for result in search_results.get("SearchResultItems", None):
matched_object_descriptor = result.get("MatchedObjectDescriptor", {})
if len(matched_object_descriptor.get("JobGrade", [])) > 0:
job_grade = matched_object_descriptor.get("JobGrade", [])[0].get(
"Code", ""
)
else:
job_grade = ""
jobs_dict = {
"position_title": matched_object_descriptor.get("PositionTitle", ""),
"position_id": matched_object_descriptor.get("PositionID", ""),
"position_uri": matched_object_descriptor.get("PositionURI", ""),
"position_start_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionStartDate", "")
),
"position_end_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionEndDate", "")
),
"job_grade": job_grade,
"low_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("LowGrade", ""),
"high_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("HighGrade", ""),
}
# map hiring-path code(s) for each job to description(s)
if len(codes_responses.get("CodeList", [])) > 0:
hiring_path_codes = codes_responses.get("CodeList", [])[0].get(
"ValidValue", []
)
else:
hiring_path_codes = []
hiring_path = [
item
for item in result.get("MatchedObjectDescriptor", {})
.get("UserArea", {})
.get("Details", {})
.get("HiringPath", [])
]
hp = []
for path in hiring_path:
hpa = [
item for item in hiring_path_codes if item["Code"] == path.upper()
]
if hpa:
hp.append(hpa[0].get("Value", ""))
else:
hp.append(path)
hiring_path_list = ", ".join(str(n) for n in hp)
open_to = {"open_to": hiring_path_list}
jobs_dict.update(open_to)
jobData.append(jobs_dict)
return {"jobData": jobData} | 5,333,012 |
def artanh(x) -> ProcessBuilder:
"""
Inverse hyperbolic tangent
:param x: A number.
:return: The computed angle in radians.
"""
return _process('artanh', x=x) | 5,333,013 |
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels) | 5,333,014 |
def flop_gemm(n, k):
"""# of + and * for matmat of nxn matrix with nxk matrix, with accumulation
into the output."""
return 2*n**2*k | 5,333,015 |
def index() -> str:
"""Rest endpoint to test whether the server is correctly working
Returns:
str: The default message string
"""
return 'DeChainy server greets you :D' | 5,333,016 |
def get_git_hash() -> str:
"""Get the PyKEEN git hash.
:return:
The git hash, equals 'UNHASHED' if encountered CalledProcessError, signifying that the
code is not installed in development mode.
"""
with open(os.devnull, 'w') as devnull:
try:
ret = check_output( # noqa: S603,S607
['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(__file__),
stderr=devnull,
)
except CalledProcessError:
return 'UNHASHED'
else:
return ret.strip().decode('utf-8')[:8] | 5,333,017 |
def load_json_samples(path: AnyStr) -> List[str]:
"""
Loads samples from a json file
:param path: Path to the target file
:return: List of samples
"""
with open(path, "r", encoding="utf-8") as file:
samples = json.load(file)
if isinstance(samples, list):
return samples
else:
raise RuntimeError(f"File's content must be list-like") | 5,333,018 |
def openPort(path = SERIALPATH):
"""open the serial port for the given path"""
try:
port = Serial(path, baudrate = 115200)
except :
print("No serial device on the given path :" + path)
sys.exit()
return(port) | 5,333,019 |
def test_skasch():
"""
Run `python -m pytest ./day-03/part-2/skasch.py` to test the submission.
"""
assert (
SkaschSubmission().run(
"""
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
""".strip()
)
== 230
) | 5,333,020 |
def decompress_lzma(data: bytes) -> bytes:
"""decompresses lzma-compressed data
:param data: compressed data
:type data: bytes
:raises _lzma.LZMAError: Compressed data ended before the end-of-stream marker was reached
:return: uncompressed data
:rtype: bytes
"""
props, dict_size = struct.unpack("<BI", data[:5])
lc = props % 9
props = props // 9
pb = props // 5
lp = props % 5
dec = lzma.LZMADecompressor(
format=lzma.FORMAT_RAW,
filters=[
{
"id": lzma.FILTER_LZMA1,
"dict_size": dict_size,
"lc": lc,
"lp": lp,
"pb": pb,
}
],
)
return dec.decompress(data[5:]) | 5,333,021 |
async def on_guild_join(guild):
"""When the bot joins a server send a webhook with detailed information as well as print out some basic information."""
embed = discord.Embed(
title = "Joined a server!",
timestamp = datetime.datetime.utcnow(),
color = 0x77DD77
)
embed.add_field(name = "Server Name", value = guild.name)
embed.add_field(name = "Server Members", value = len(guild.members) - 1)
embed.add_field(name = "Server ID", value = guild.id)
embed.add_field(name = "Server Owner", value = f"{guild.owner.name}#{guild.owner.discriminator}")
embed.add_field(name = "Server Owner ID", value = guild.owner.id)
embed.set_footer(text = f"I am now in {len(bot.guilds)} servers", icon_url = guild.icon_url)
async with aiohttp.ClientSession() as session:
webhook = discord.Webhook.from_url(Config.JL_WEBHOOK, adapter = discord.AsyncWebhookAdapter(session))
await webhook.send(embed = embed, username = "Joined a server")
print(f"{Style.BRIGHT}{Fore.LIGHTGREEN_EX}[JOINED-SERVER]{Fore.WHITE} Joined {Fore.YELLOW}{guild.name}{Fore.WHITE} with {Fore.YELLOW}{len(guild.members) - 1}{Fore.WHITE} members.") | 5,333,022 |
def visualize_data_correlation():
"""Large correlation plot"""
df = pd.read_csv('sp500_joined_closes.csv')
#df['AAPL'].plot()
#plt.show()
df_corr = df.corr()
#print(df_corr.head())
data = df_corr.values
fig = plt.figure()
# plt.rcParams['figure.figsize'] = [4,2]
ax = fig.add_subplot(1,1,1)
heatmap = ax.pcolor(data, cmap=plt.cm.RdYlGn)
fig.colorbar(heatmap)
ax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
column_labels = df_corr.columns
row_labels = df_corr.index
ax.set_xticklabels(column_labels)
ax.set_yticklabels(row_labels)
plt.xticks(rotation=90)
heatmap.set_clim(-1,1)
plt.tight_layout()
plt.show() | 5,333,023 |
def save_dict_to_json(dictionary, filename):
"""
Saves a python dictionary to json file
:param dictionary: dictionary object
:param config: full filename to json file in which to save config
:return: None
"""
with open(filename, 'w+') as f:
json.dump(dictionary, f) | 5,333,024 |
def AddCreateFlags(parser):
"""Adds all flags needed for the create command."""
GetDescriptionFlag().AddToParser(parser)
group = base.ArgumentGroup(
'Manage the specific SKU reservation properties to create', required=True)
group.AddArgument(GetRequireSpecificAllocation())
group.AddArgument(GetVmCountFlag())
group.AddArgument(GetMinCpuPlatform())
group.AddArgument(GetMachineType())
group.AddArgument(GetLocalSsdFlag())
group.AddArgument(GetAcceleratorFlag())
group.AddToParser(parser) | 5,333,025 |
def generate_summary_table(bib):
"""Description of generate_summary_table
Parse dl4m.bib to create a simple and readable ReadMe.md table.
"""
nb_articles = str(get_nb_articles(bib))
nb_authors = str(get_authors(bib))
nb_tasks = str(get_field(bib, "task"))
nb_datasets = str(get_field(bib, "dataset"))
nb_archi = str(get_field(bib, "architecture"))
articles = generate_list_articles(bib)
readme_fn = "README.md"
readme = ""
pasted_articles = False
with open(readme_fn, "r", encoding="utf-8") as filep:
for line in filep:
if "| " in line[:2] and line[2] != " ":
if not pasted_articles:
readme += articles
pasted_articles = True
elif "papers referenced" in line:
readme += "- " + nb_articles + " papers referenced. "
readme += "See the details in [dl4m.bib](dl4m.bib).\n"
elif "unique researchers" in line:
readme += "- " + nb_authors + " unique researchers. "
readme += "See the list of [authors](authors.md).\n"
elif "tasks investigated" in line:
readme += "- " + nb_tasks + " tasks investigated. "
readme += "See the list of [tasks](tasks.md).\n"
elif "datasets used" in line:
readme += "- " + nb_datasets + " datasets used. "
readme += "See the list of [datasets](datasets.md).\n"
elif "architecture used" in line:
readme += "- " + nb_archi + " architectures used. "
readme += "See the list of [architectures](architectures.md).\n"
else:
readme += line
with open(readme_fn, "w", encoding="utf-8") as filep:
filep.write(readme)
print("New ReadMe generated") | 5,333,026 |
def validate(request):
"""
Validate actor name exists in database before searching.
If more than one name fits the criteria, selects the first one
and returns the id.
Won't render.
"""
search_for = request.GET.get('search-for', default='')
start_from = request.GET.get('start-from', default='')
data = {}
search_for_actor = get_actor(search_for)
start_from_actor = get_actor(start_from)
if not search_for_actor:
data['errors'] = {'search-for': 'Not a valid name'}
if not start_from_actor:
data['errors'] = {'start-from': 'Not a valid name'}
if 'errors' in data:
data['status'] = 'false'
return JsonResponse(data, status=404)
else:
data = {
'search-for': search_for_actor.id,
'start-from': start_from_actor.id,
}
return JsonResponse(data) | 5,333,027 |
def merge(left, right, on=None, left_on=None, right_on=None):
"""Merge two DataFrames using explicit-comms.
This is an explicit-comms version of Dask's Dataframe.merge() that
only supports "inner" joins.
Requires an activate client.
Notice
------
As a side effect, this operation concatenate all partitions located on
the same worker thus npartitions of the returned dataframe equals number
of workers.
Parameters
----------
left: dask.dataframe.DataFrame
right: dask.dataframe.DataFrame
on : str or list of str
Column or index level names to join on. These must be found in both
DataFrames.
left_on : str or list of str
Column to join on in the left DataFrame.
right_on : str or list of str
Column to join on in the right DataFrame.
Returns
-------
df: dask.dataframe.DataFrame
Merged dataframe
"""
# Making sure that the "on" arguments are list of column names
if on:
on = [on] if isinstance(on, str) else list(on)
if left_on:
left_on = [left_on] if isinstance(left_on, str) else list(left_on)
if right_on:
right_on = [right_on] if isinstance(right_on, str) else list(right_on)
if left_on is None:
left_on = on
if right_on is None:
right_on = on
if not (left_on and right_on):
raise ValueError(
"Some combination of the on, left_on, and right_on arguments must be set"
)
return submit_dataframe_operation(
comms.default_comms(),
local_df_merge,
df_list=(left, right),
extra_args=(left_on, right_on),
) | 5,333,028 |
def extract_directory(directory):
"""Extracts bz2 compressed directory at `directory` if directory is compressed.
"""
if not os.path.isdir(directory):
head, _ = os.path.split(os.path.abspath(directory))
print('Unzipping...', end=' ', flush=True)
unpack_archive(directory + '.tar.bz2', extract_dir=head) | 5,333,029 |
def estimate_fs(t):
"""Estimates data sampling rate"""
sampling_rates = [
2000,
1250,
1000,
600,
500,
300,
250,
240,
200,
120,
75,
60,
50,
30,
25,
]
fs_est = np.median(1 / np.diff(t))
fs = min(sampling_rates, key=lambda x: abs(x - fs_est))
return fs | 5,333,030 |
def get_case_number(caselist):
"""Get line number from file caselist."""
num = 0
with open(caselist, 'r') as casefile:
for line in casefile:
if line.strip().startswith('#') is False:
num = num + 1
return num | 5,333,031 |
def test_bam_to_h5_h5(expected_fixture, dir_out, sample):
"""
Test :py:const:`riboviz.workflow_r.BAM_TO_H5_R` H5 files for
equality. See :py:func:`riboviz.h5.equal_h5`.
:param expected_fixture: Expected data directory
:type expected_fixture: str or unicode
:param dir_out: Output directory
:type dir_out: str or unicode
:param sample: Sample name
:type sample: str or unicode
"""
file_name = h5.H5_FORMAT.format(sample)
dir_out_name = os.path.basename(os.path.normpath(dir_out))
expected_file = os.path.join(expected_fixture, dir_out_name,
sample, file_name)
h5.equal_h5(expected_file,
os.path.join(dir_out, sample, file_name)) | 5,333,032 |
def build_param_float_request(*, scenario: str, value: float, **kwargs: Any) -> HttpRequest:
"""Send a post request with header values "scenario": "positive", "value": 0.07 or "scenario":
"negative", "value": -3.0.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword scenario: Send a post request with header values "scenario": "positive" or "negative".
:paramtype scenario: str
:keyword value: Send a post request with header values 0.07 or -3.0.
:paramtype value: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = "/header/param/prim/float"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["scenario"] = _SERIALIZER.header("scenario", scenario, "str")
header_parameters["value"] = _SERIALIZER.header("value", value, "float")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, **kwargs) | 5,333,033 |
def async_entries_for_config_entry(
registry: DeviceRegistry, config_entry_id: str
) -> List[DeviceEntry]:
"""Return entries that match a config entry."""
return [
device
for device in registry.devices.values()
if config_entry_id in device.config_entries
] | 5,333,034 |
def closure_js_binary(**kwargs):
"""Invokes actual closure_js_binary with defaults suitable
for non-test JS files compilation.
"""
kwargs.setdefault("compilation_level", "ADVANCED")
kwargs.setdefault("dependency_mode", "STRICT")
kwargs.setdefault("language", "ECMASCRIPT5_STRICT")
kwargs.setdefault("defs", [
"--assume_function_wrapper",
"--rewrite_polyfills=false",
"--new_type_inf",
"--export_local_property_definitions",
"--language_out=ES5_STRICT",
"--isolation_mode=IIFE",
"--generate_exports",
"--jscomp_warning=newCheckTypes",
"--jscomp_off=newCheckTypesExtraChecks",
"--hide_warnings_for=closure/goog",
])
_closure_js_binary_alias(**kwargs) | 5,333,035 |
def load_material(name: str) -> Material:
"""Load a material from the materials library
Args:
name (str): Name of material
Raises:
FileNotFoundError: If material is not found, raises an error
Returns:
Material: Loaded material
"""
try:
with open(
Path(__file__).parent.joinpath(f"resources/{name}.yaml"),
"r") as f:
matdict = yaml.full_load(f)
try:
matdict["properties"]["alpha"] = get_alpha(name)
except FileNotFoundError:
print(f"{name}.csv does not exist!")
return create_material(**matdict["properties"])
except FileNotFoundError:
print (f"{name} is not an available material!") | 5,333,036 |
def classify_checker(Classifier):
"""Type-checks the classifier input."""
if isinstance(Classifier, classifier.BaseClassifier) is False:
raise TypeError('Classifier must be a BaseClassifier, '
'was: %s', str(type(Classifier)))
elif Classifier.classified is None:
raise ValueError('`Classifier.classify()` method must be run first.') | 5,333,037 |
def reportBusniessModelSummaryView(request):
""" รายงาน สรุปจำนวนผู้สมัครตามสถานะธุรกิจ ทุกขั้นตอน"""
queryset = SmeCompetition.objects \
.values('enterpise__business_model', 'enterpise__business_model__name') \
.annotate(step_register=Count('enterpise__business_model')) \
.annotate(step_screen=Count('enterpise__business_model', filter=Q(state__in=[2,4,5, 6, 7, 8, 9, 10, 11]))) \
.annotate(step_interview=Count('enterpise__business_model', filter=Q(state__in=[4, 6, 7, 8, 9, 10, 11]))) \
.annotate(step_sitevisit=Count('enterpise__business_model', filter=Q(state__in=[6, 8, 9, 10]))) \
.order_by('enterpise__business_model__name')
total_sme = SmeCompetition.objects.filter(state__in=[6, 8, 9, 10, 11]).count()
total_register = SmeCompetition.objects.filter(active=True).count()
total_screen = SmeCompetition.objects.filter(active=True, state__in=[2,4,5,6,7,8,9,10,11]).count()
total_interview = SmeCompetition.objects.filter(active=True, state__in=[4, 6, 7, 8, 9, 10, 11]).count()
total_summary = SmeCompetition.objects.filter(active=True, state__in=[6, 8, 9, 10, 11]).count()
context = {'queryset': queryset, 'total_register':total_register, 'total_screen':total_screen \
,'total_interview':total_interview, 'total_summary':total_summary}
return render(request, 'app_sme12/report/businessmodel_summary.html', context) | 5,333,038 |
def _get_announce_url(rjcode: str) -> str:
"""Get DLsite announce URL corresponding to an RJ code."""
return _ANNOUNCE_URL.format(rjcode) | 5,333,039 |
def get_r_adv(x, decoder, it=1, xi=1e-1, eps=10.0):
"""
Virtual Adversarial Training
https://arxiv.org/abs/1704.03976
"""
x_detached = x.detach()
with torch.no_grad():
pred = F.softmax(decoder(x_detached), dim=1)
d = torch.rand(x.shape).sub(0.5).to(x.device)
d = _l2_normalize(d)
for _ in range(it):
d.requires_grad_()
pred_hat = decoder(x_detached + xi * d)
logp_hat = F.log_softmax(pred_hat, dim=1)
adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean')
# print ('a')
# print (adv_distance)
adv_distance.backward()
# print('c')
d = _l2_normalize(d.grad)
# print ('b')
decoder.zero_grad()
r_adv = d * eps
return r_adv | 5,333,040 |
def fetch_brats(datasetdir):
""" Fetch/prepare the Brats dataset for pynet.
Parameters
----------
datasetdir: str
the dataset destination folder.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading brats dataset.")
def _crop(arr):
return arr[45: 195, 30: 220, 10: 145]
def _norm(arr):
logical_mask = (arr != 0)
mean = arr[logical_mask].mean()
std = arr[logical_mask].std()
return ((arr - mean) / std).astype(np.single)
traindir = os.path.join(datasetdir, "MICCAI_BraTS_2019_Data_Training")
mapping_path = os.path.join(traindir, "name_mapping.csv")
if not os.path.isfile(mapping_path):
raise ValueError(
"You must first download the Brats data in the '{0}' folder "
"following the 'https://www.med.upenn.edu/sbia/brats2018/"
"registration.html' instructions.".format(datasetdir))
desc_path = os.path.join(datasetdir, "pynet_brats.tsv")
input_path = os.path.join(datasetdir, "pynet_brats_inputs.npy")
output_path = os.path.join(datasetdir, "pynet_brats_outputs.npy")
if not os.path.isfile(desc_path):
df = pd.read_csv(mapping_path, sep=",")
arr = df[["BraTS_2019_subject_ID", "Grade"]].values
input_dataset = []
output_dataset = []
nb_subjects = len(arr)
with progressbar.ProgressBar(max_value=nb_subjects,
redirect_stdout=True) as bar:
for cnt, (sid, grade) in enumerate(arr):
logger.debug("Processing {0}...".format(sid))
datadir = os.path.join(traindir, grade, sid)
data = []
for mod in MODALITIES:
path = os.path.join(
datadir, "{0}_{1}.nii.gz".format(sid, mod))
data.append(_norm(_crop(nib.load(path).get_data())))
data = np.asarray(data)
input_dataset.append(data)
path = os.path.join(datadir, "{0}_seg.nii.gz".format(sid))
_arr = nib.load(path).get_data()
data = []
for value in (0, 1, 2, 4):
data.append(_crop(_arr == value))
data = np.asarray(data)
output_dataset.append(data)
bar.update(cnt)
input_dataset = np.asarray(input_dataset)
np.save(input_path, input_dataset)
output_dataset = np.asarray(output_dataset)
np.save(output_path, output_dataset)
dataset_desc = pd.DataFrame(
arr, columns=["participant_id", "grade"])
dataset_desc.to_csv(desc_path, sep="\t")
return Item(input_path=input_path, output_path=output_path,
metadata_path=desc_path) | 5,333,041 |
def init_app(app):
"""
Loads the entity modules.
pylint disable as registration does not perform action yet.
Parameters
----------
app (Flask): The flask application.
"""
init_routes(app) | 5,333,042 |
def get_factors(shoppers, n_components=4, random_state=903, **kwargs):
"""
Find Factors to represent the shopper-level features in compressed space.
These factors will be used to map simplified user input from application
to the full feature space used in modeling.
Args:
shoppers (pd.DataFrame): full set of shoppers in feature data (train + test)
n_components (int): number of factors to mine. Defaults to 4 and should stay that way (application
UI based on these 4 analyzed factors)
random_state (int): sets random state for factor analysis algorithm. Defaults to 4 (and should stay that way)
kwargs: additional keyword arguments for sklearn.decomposition.FactorAnalysis
Returns:
pd.DataFrame: will have n_components rows and n_features columns. The values of this matrix
can be used to map factors to full feature set (on std normal scale).
"""
# Remove columns which should not be considered in factor analysis
x = shoppers
for col in ['user_id', 'n_orders', 'label']:
if col in x.columns:
x = x.drop(columns=col)
# Need to scale data as columns on incommensurate scales
cols = x.columns
x = preprocessing.scale(x)
fa = FactorAnalysis(n_components, random_state=random_state, **kwargs)
fa.fit(x)
return pd.DataFrame(fa.components_, columns=cols) | 5,333,043 |
def cols_to_array(*cols, remove_na: bool = True) -> Column:
"""
Create a column of ArrayType() from user-supplied column list.
Args:
cols: columns to convert into array.
remove_na (optional): Remove nulls from array. Defaults to True.
Returns:
Column of ArrayType()
"""
if remove_na:
return F.filter(F.array(*cols), lambda x: x.isNotNull())
else:
return F.array(*cols) | 5,333,044 |
def suppress() -> None:
"""Suppress output within context."""
with open(os.devnull, "w") as null:
with redirect_stderr(null):
yield | 5,333,045 |
def _git_repo_status(repo):
"""Get current git repo status.
:param repo: Path to directory containing a git repo
:type repo: :class:`pathlib.Path()`
:return: Repo status
:rtype: dict
"""
repo_status = {
'path': repo
}
options = ['git', '-C', str(repo), 'status', '-s']
changes = _run_cmd(options).stdout.decode()
repo_status['uncommited changes'] = True if len(changes) else False
local, remote = _git_get_branches(repo)
repo_status['local only branches'] = bool(set(remote) - set(local))
repo_status['ahead of origin'] = _git_get_ahead(repo)
return repo_status | 5,333,046 |
def _parseList(s):
"""Validation function. Parse a comma-separated list of strings."""
return [item.strip() for item in s.split(",")] | 5,333,047 |
def true_segments_1d(segments,
mode=SegmentsMode.CENTERS,
max_gap=0,
min_length=0,
name=None):
"""Labels contiguous True runs in segments.
Args:
segments: 1D boolean tensor.
mode: The SegmentsMode. Returns the start of each segment (STARTS), or the
rounded center of each segment (CENTERS).
max_gap: Fill gaps of length at most `max_gap` between true segments. int.
min_length: Minimum length of a returned segment. int.
name: Optional name for the op.
Returns:
run_centers: int32 tensor. Depending on `mode`, either the start of each
True run, or the (rounded) center of each True run.
run_lengths: int32; the lengths of each True run.
"""
with tf.name_scope(name, "true_segments", [segments]):
segments = tf.convert_to_tensor(segments, tf.bool)
run_starts, run_lengths = _segments_1d(segments, mode=SegmentsMode.STARTS)
# Take only the True runs. After whichever run is True first, the True runs
# are every other run.
first_run = tf.cond(
# First value is False, or all values are False. Handles empty segments
# correctly.
tf.logical_or(tf.reduce_any(segments[0:1]), ~tf.reduce_any(segments)),
lambda: tf.constant(0),
lambda: tf.constant(1))
num_runs = tf.shape(run_starts)[0]
run_nums = tf.range(num_runs)
is_true_run = tf.equal(run_nums % 2, first_run % 2)
# Find gaps between True runs that can be merged.
is_gap = tf.logical_and(
tf.not_equal(run_nums % 2, first_run % 2),
tf.logical_and(
tf.greater(run_nums, first_run), tf.less(run_nums, num_runs - 1)))
fill_gap = tf.logical_and(is_gap, tf.less_equal(run_lengths, max_gap))
# Segment the consecutive runs of True or False values based on whether they
# are True, or are a gap of False values that can be bridged. Then, flatten
# the runs of runs.
runs_to_merge = tf.logical_or(is_true_run, fill_gap)
run_of_run_starts, _ = _segments_1d(runs_to_merge, mode=SegmentsMode.STARTS)
# Get the start of every new run from the original run starts.
merged_run_starts = tf.gather(run_starts, run_of_run_starts)
# Make an array mapping the original runs to their run of runs. Increment
# the number for every run of run start except for the first one, so that
# the array has values from 0 to num_run_of_runs.
merged_run_inds = tf.cumsum(
tf.sparse_to_dense(
sparse_indices=tf.cast(run_of_run_starts[1:, None], tf.int64),
output_shape=tf.cast(num_runs[None], tf.int64),
sparse_values=tf.ones_like(run_of_run_starts[1:])))
# Sum the lengths of the original runs that were merged.
merged_run_lengths = tf.segment_sum(run_lengths, merged_run_inds)
if mode is SegmentsMode.CENTERS:
merged_starts_or_centers = (
merged_run_starts + tf.floordiv(merged_run_lengths - 1, 2))
else:
merged_starts_or_centers = merged_run_starts
# If there are no true values, increment first_run to 1, so we will skip
# the single (false) run.
first_run += tf.to_int32(tf.logical_not(tf.reduce_any(segments)))
merged_starts_or_centers = merged_starts_or_centers[first_run::2]
merged_run_lengths = merged_run_lengths[first_run::2]
# Only take segments at least min_length long.
is_long_enough = tf.greater_equal(merged_run_lengths, min_length)
is_long_enough.set_shape([None])
merged_starts_or_centers = tf.boolean_mask(merged_starts_or_centers,
is_long_enough)
merged_run_lengths = tf.boolean_mask(merged_run_lengths, is_long_enough)
return merged_starts_or_centers, merged_run_lengths | 5,333,048 |
def get_user_language_keyboard(user):
"""Get user language picker keyboard."""
buttons = []
# Compile the possible options for user sorting
for language in supported_languages:
button = InlineKeyboardButton(
language,
callback_data=f'{CallbackType.user_change_language.value}:{user.id}:{language}'
)
buttons.append([button])
github_url = 'https://github.com/Nukesor/ultimate-poll-bot/tree/master/i18n'
new_language = i18n.t('keyboard.add_new_language', locale=user.locale)
buttons.append([InlineKeyboardButton(text=new_language, url=github_url)])
buttons.append([get_back_to_settings_button(user)])
return InlineKeyboardMarkup(buttons) | 5,333,049 |
def splitDataSet(dataSet, index, value):
"""
划分数据集,取出index对应的值为value的数据
dataSet: 待划分的数据集
index: 划分数据集的特征
value: 需要返回的特征的值
"""
retDataSet = []
for featVec in dataSet:
if featVec[index] == value:
reducedFeatVec = featVec[:index]
reducedFeatVec.extend(featVec[index+1:])
retDataSet.append(reducedFeatVec)
# 返回index列为value的数据集(去除index列)
return retDataSet | 5,333,050 |
def weights_init(module, nonlinearity="relu"):
"""Initialize a module and all its descendents.
Parameters
----------
module : nn.Module
module to initialize.
"""
# loop over direct children (not grand children)
for m in module.children():
# all standard layers
if isinstance(m, torch.nn.modules.conv._ConvNd):
# used in https://github.com/brain-research/realistic-ssl-evaluation/
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity=nonlinearity)
try:
nn.init.zeros_(m.bias)
except AttributeError:
pass
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, nonlinearity=nonlinearity)
try:
nn.init.zeros_(m.bias)
except AttributeError:
pass
elif isinstance(m, nn.BatchNorm2d):
try:
m.weight.data.fill_(1)
m.bias.data.zero_()
except AttributeError: # affine = False
pass
elif hasattr(m, "reset_parameters"): # if has a specific reset
m.reset_parameters()
#! don't go in grand children because you might have specifc weights you don't want to reset
else:
weights_init(m, nonlinearity=nonlinearity) | 5,333,051 |
def spam(a, b, c):
"""The spam function Returns a * b + c"""
return a * b + c | 5,333,052 |
def show_rules(cli, nick, chan, rest):
"""Displays the rules."""
reply(cli, nick, chan, var.RULES) | 5,333,053 |
def in_skill_product_response(handler_input):
"""Get the In-skill product response from monetization service."""
""" # type: (HandlerInput) -> Union[InSkillProductsResponse, Error] """
locale = handler_input.request_envelope.request.locale
ms = handler_input.service_client_factory.get_monetization_service()
return ms.get_in_skill_products(locale) | 5,333,054 |
def ipaddr(
value: typing.Union[str, int],
query: typing.Optional[str] = None,
) -> str:
"""Filter IP addresses and networks.
.. versionadded:: 1.1
Implements Ansible `ipaddr filter
<https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters_ipaddr.html>`_.
"""
return _utils.ip_filter(value, query=query) | 5,333,055 |
def Run3dTests(tally):
"""Run all of the 3d tests on a single Globe."""
unpacker = Unpacker("../test_data/test.glb", False)
RunTest(tally, TestGlobeImageryPacket, unpacker)
RunTest(tally, TestGlobeTerrainPacket, unpacker)
RunTest(tally, TestGlobeVectorPacket, unpacker)
RunTest(tally, TestGlobeQtPacket, unpacker)
RunTest(tally, TestGlobeDbRoot, unpacker)
RunTest(tally, TestGlobeFile, unpacker) | 5,333,056 |
def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints):
"""
Find a list of specified number of points between two given ones that are
equally spaced along the great circle arc connecting given points.
:param float lon1, lat1, depth1:
Coordinates of a point to start from. The first point in a resulting
list has these coordinates.
:param float lon2, lat2, depth2:
Coordinates of a point to finish at. The last point in a resulting
list has these coordinates.
:param npoints:
Integer number of points to return. First and last points count,
so if there have to be two intervals, ``npoints`` should be 3.
:returns:
Tuple of three 1d numpy arrays: longitudes, latitudes and depths
of resulting points respectively.
Finds distance between two reference points and calls
:func:`npoints_towards`.
"""
hdist = geodetic_distance(lon1, lat1, lon2, lat2)
vdist = depth2 - depth1
rlons, rlats, rdepths = npoints_towards(
lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2),
hdist, vdist, npoints
)
# the last point should be left intact
rlons[-1] = lon2
rlats[-1] = lat2
rdepths[-1] = depth2
return rlons, rlats, rdepths | 5,333,057 |
def maxPoolLayer(x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
"""max-pooling"""
return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
strides = [1, strideX, strideY, 1], padding = padding, name = name) | 5,333,058 |
def coo2st(coo):
"""
transform matrix in sparse coo_matrix to sparse tensor of torch
INPUT
coo - matrix in sparse coo_matrix format
OUTPUT
coo matrix in torch.sparse.tensor
"""
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape)) | 5,333,059 |
def test_stability(
percentage_to_remove,
x_features,
y_labels,
outer_cv_splits,
inner_cv_splits,
hyperparameter_space,
model_name,
max_iter=1000,
export=True
):
"""Train model after removing specified features.
Train elastic net model within a defined hyperparameter space via a nested cross validation after removing a
percentage of the data set.
:param int percentage_to_remove: percentage of pathways that will be removed [0,100]
:param numpy.array x_features: 2D matrix of pathway scores and samples
:param list y_labels: class labels of samples
:param int outer_cv_splits: number of folds for cross validation split in outer loop
:param int inner_cv_splits: number of folds for cross validation split in inner loop
:param list hyperparameter_space: list of hyperparameters for l1 and l2 priors
:param str model_name: name of the model
:param int max_iter: default to 1000 to ensure convergence
:return:
"""
# TODO: Remove X% of the x_features
results, _ = train_elastic_net_model(
x_features,
y_labels,
outer_cv_splits,
inner_cv_splits,
hyperparameter_space,
model_name,
max_iter=max_iter,
export=export
)
# TODO: Return the features that were removed
return results | 5,333,060 |
def make_dataset(data, nafc):
"""Create a PsiData object from column based input.
Parameters
----------
data : sequence on length 3 sequences
Psychometric data in colum based input,
e.g.[[1, 1, 5], [2, 3, 5] [3, 5, 5]].
nafc : int
Number of alternative choices in forced choice procedure.
Returns
-------
data: PsiData
Dataset object.
"""
data = np.array(data).T
x = sfr.vector_double(map(float, data[0]))
k = sfr.vector_int(map(int, data[1]))
N = sfr.vector_int(map(int, data[2]))
return sfr.PsiData(x,N,k,nafc) | 5,333,061 |
def TorchFFTConv2d(a, K):
"""
FFT tensor convolution of image a with kernel K
Args:
a (torch.Tensor): 1-channel Image as tensor with at least 2 dimensions.
Dimensions -2 & -1 are spatial dimensions and all other
dimensions are assumed to be batch dimensions
K (torch.Tensor): 1-channel kernel as tensor with at least 2 dimensions.
Return:
Absolute value of the convolution of image a with kernel K
"""
K = torch_fft.rfft2(K)
a = torch_fft.rfft2(a)
img_conv = TorchComplexMul(K, a)
img_conv = torch_fft.irfft2(img_conv)
return (img_conv**2).sqrt().cpu() | 5,333,062 |
def pad(obj, pad_length):
"""
Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros.
"""
if not isinstance(obj, Track):
raise TypeError("Support only `pypianoroll.Track` class objects")
copied = deepcopy(obj)
copied.pad(pad_length)
return copied | 5,333,063 |
def main():
"""Load possessions from pbpstats for all seasons on file, and store them"""
season_info = seasons_on_file()
for league, year, season_type in zip(
season_info["league"], season_info["year"], season_info["season_type"]
):
logger.info(f"Loading season data for {league} {year} {season_type} from file")
season = season_from_file(league, year, season_type)
logger.info(
f"Parsing possessions data for {league} {year} {season_type} from pbpstats"
)
possessions = possessions_from_season(season)
logger.info(f"Saving possessions data for {league} {year} {season_type}")
save_possessions(possessions)
logger.info("Complete") | 5,333,064 |
def validate_server_object(server_object):
"""
:param server_object:
"""
if isinstance(server_object, dict):
for k, v in server_object.items():
if k not in ["url", "description", "variables"]:
raise ValidationError(
'Invalid server object. Unknown field "{field}". See {url}'.format(field=k, url="http://swagger.io/specification/#serverObject")
)
if k == "variables":
validate_server_variables_object(v)
continue
if k == "url":
if not validate_url(v):
raise ValidationError("Invalid url. See {url}".format(url="http://swagger.io/specification/#serverObject"))
if "url" not in server_object:
raise ValidationError('Invalid server object. Missing field "url"')
else:
raise ValidationError("Invalid server object. See {url}".format(url="http://swagger.io/specification/#serverObject")) | 5,333,065 |
def stat_mtime(stat):
"""Returns the mtime field from the results returned by os.stat()."""
return stat[8] | 5,333,066 |
def from_yaml_dictionary(yaml_gra_dct, one_indexed=True):
""" read the graph from a yaml dictionary
"""
atm_dct = yaml_gra_dct['atoms']
bnd_dct = yaml_gra_dct['bonds']
atm_dct = dict_.transform_values(
atm_dct, lambda x: tuple(map(x.__getitem__, ATM_PROP_NAMES)))
bnd_dct = dict_.transform_keys(
bnd_dct, lambda x: frozenset(map(int, x.split('-'))))
bnd_dct = dict_.transform_values(
bnd_dct, lambda x: tuple(map(x.__getitem__, BND_PROP_NAMES)))
gra = _create.from_atoms_and_bonds(atm_dct, bnd_dct)
if one_indexed:
# revert one-indexing if the input is one-indexed
atm_key_dct = {atm_key: atm_key-1 for atm_key in atom_keys(gra)}
gra = relabel(gra, atm_key_dct)
return gra | 5,333,067 |
def _check_no_current_table(new_obj, current_table):
""" Raises exception if we try to add a relation or a column
with no current table. """
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column')) | 5,333,068 |
def classifier_train(
X_train,
y_train,
X_val,
y_val,
clf,
k_fold_no=10,
uo_sample_method=None,
imbalance_ratio=1,
print_results=False,
train_on_all=False,
):
"""Trains a sklearn classifier using k-fold cross-validation. Returns the ROC_AUC score, with other
parameters in a pandas df.
To-Do:
- When using SVM, only use under sampling when feature count over a certain size,
otherwise will blow up
"""
skfolds = StratifiedKFold(n_splits=k_fold_no, shuffle=True, random_state=42)
# below code is modified from 'Hands on Machine Learning' by Geron (pg. 196)
roc_auc_results = []
auc_results = []
precision_results = []
recall_results = []
f1_results = []
if print_results == True:
# print definitions of precision / recall
print(
"\033[1m",
"Precision:",
"\033[0m",
"What proportion of positive identifications were actually correct?",
)
print(
"\033[1m",
"Recall:",
"\033[0m",
"What proportion of actual positives were identified correctly?",
)
# implement cross-validation with
for train_index, test_index in skfolds.split(X_train, y_train):
# use clone to do a deep copy of model without copying attached data
# https://scikit-learn.org/stable/modules/generated/sklearn.base.clone.html
clone_clf = clone(clf)
X_train_fold = X_train[train_index]
y_train_fold = y_train[train_index]
# get the test folds
X_test_fold = X_train[test_index]
y_test_fold = y_train[test_index]
# # add over/under sampling (do this after scaling)
# X_train_fold, y_train_fold = under_over_sampler(
# X_train_fold, y_train_fold, method=uo_sample_method, ratio=imbalance_ratio
# )
clone_clf.fit(X_train_fold, y_train_fold)
final_clf = clone_clf
(
auc_score,
roc_score,
precision_result,
recall_result,
f1_result,
) = calculate_scores(clone_clf, X_test_fold, y_test_fold)
auc_results.append(auc_score)
precision_results.append(precision_result)
recall_results.append(recall_result)
f1_results.append(f1_result)
roc_auc_results.append(roc_score)
if print_results == True:
print(
"ROC: {:.3%} \t AUC: {:.3%} \t Pr: {:.3%} \t Re: {:.3%} \t F1: {:.3%}".format(
roc_score, auc_score, precision_result, recall_result, f1_result
)
)
if print_results == True:
print("\033[1m", "\nFinal Results:", "\033[0m")
print(
"ROC: {:.3%} \t AUC: {:.3%} \t Pr: {:.3%} \t Re: {:.3%} \t F1: {:.3%}".format(
np.sum(roc_auc_results) / k_fold_no,
np.sum(auc_results) / k_fold_no,
np.sum(precision_results) / k_fold_no,
np.sum(recall_results) / k_fold_no,
np.sum(f1_results) / k_fold_no,
)
)
# standard deviations
print(
"Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%}".format(
np.std(roc_auc_results),
np.std(auc_results),
np.std(precision_results),
np.std(recall_results),
np.std(f1_results),
)
)
result_dict = {
"roc_auc_score_train": np.sum(roc_auc_results) / k_fold_no,
"roc_auc_std_train": np.std(roc_auc_results),
"roc_auc_min_train": np.min(roc_auc_results),
"roc_auc_max_train": np.max(roc_auc_results),
"auc_score_train": np.sum(auc_results) / k_fold_no,
"auc_std_train": np.std(auc_results),
"auc_min_train": np.min(auc_results),
"auc_max_train": np.max(auc_results),
"f1_score_train": np.sum(f1_results) / k_fold_no,
"f1_std_train": np.std(f1_results),
"f1_min_train": np.min(f1_results),
"f1_max_train": np.max(f1_results),
"precision_train": np.sum(precision_results) / k_fold_no,
"precision_std_train": np.std(precision_results),
"precision_min_train": np.min(precision_results),
"precision_max_train": np.max(precision_results),
"recall_train": np.sum(recall_results) / k_fold_no,
"recall_std_train": np.std(recall_results),
"recall_min_train": np.min(recall_results),
"recall_max_train": np.max(recall_results),
}
# when to use ROC vs. precision-recall curves, Jason Brownlee http://bit.ly/38vEgnW
# https://stats.stackexchange.com/questions/113326/what-is-a-good-auc-for-a-precision-recall-curve
if train_on_all == True:
# now scale and fit the data on the entire training set
new_clf = clone(clf)
new_clf.fit(X_train, y_train)
(
auc_score_val,
roc_score_val,
precision_result_val,
recall_result_val,
f1_result_val,
) = calculate_scores(new_clf, X_val, y_val)
result_dict['auc_score_val'] = auc_score_val
result_dict['roc_auc_score_val'] = roc_score_val
final_clf = new_clf
return result_dict, final_clf | 5,333,069 |
def test_no_common_points():
"""Tests relative position with no common points"""
circle1 = Circle(Point(2.0, 1.0), 1.0)
circle2 = Circle(Point(6.0, 4.0), 2.0)
assert circle1.find_relative_position(circle2) == RelativePosition.NO_COMMON_POINTS
circle1 = Circle(Point(2.0, 1.0), 9.0)
circle2 = Circle(Point(6.0, 4.0), 20.0)
assert circle1.find_relative_position(circle2) == RelativePosition.NO_COMMON_POINTS | 5,333,070 |
def html_code_envir(envir, envir_spec):
"""
Return html tags that can be used to wrap formatted code
This method was created to enhance modularization of code. See latex_code_envir in latex.py
:param tuple[str, str, str] envir: code blocks arguments e.g. ('py','cod','-h')
:param str envir_spec: optional typesetting of code blocks
:return: tuple of html tags, e.g. ('<pre>','</pre>')
:rtype: [str, str]
"""
begin, end = '<pre>', '</pre>'
if envir_spec.endswith('-out') and option('ignore_output'):
begin, end = '',''
elif envir_spec.endswith('-e'):
begin, end = '',''
return begin, end | 5,333,071 |
def get_data_from_epndb(pulsar):
"""
Searches the EPN database and returns all information of the chosen pulsar
Parameters:
-----------
pulsar: string
The name of the pulsar to search for
Returns:
--------
pulsar_dict: dictionary
A dictionary in which each value is a list corresponding to a different entry on the databse. Keys:
I: list
An list of stokes I values for the profile
Q: list
An list of stokes Q values for the profile
U: list
An list of stokes U values for the profile
V: list
An list of stokes V values for the profile
freq: float
The frequency of the observation in MHz
dm: float
The measured dispersion measure
rm: float
The measured rotation measure. Returns 0.0 for no rm measurement
site: string
The location the pulsar was observed at
"""
#find all directories in epndb
all_json_dirs = glob.glob(EPNDB_LOC + "/json/*/*/*.json")
json_pulsar_dirs = []
for directory in all_json_dirs:
name = directory.split("/")[-2]
if name==pulsar:
json_pulsar_dirs.append(directory)
logger.debug("All json directories: {}".format(len(all_json_dirs)))
logger.debug("Positive json directories: {}".format(len(json_pulsar_dirs)))
pulsar_dict={"Ix":[], "Qx":[], "Ux":[], "Vx":[], "Iy":[], "Qy":[], "Uy":[], "Vy":[],\
"freq":[], "dm":[], "rm":[], "site":[]}
for file_path in json_pulsar_dirs:
with open(file_path) as json_file:
data = json.load(json_file)
header = data["hdr"]
series = data["series"]
#Ignore anything without frequency info
if "freq" in header:
pulsar_dict["freq"].append(float(header["freq"]))
pulsar_dict["dm"].append(float(header["dm"]))
pulsar_dict["rm"].append(float(header["rm"]))
pulsar_dict["site"].append(header["site"])
pulsar_dict["Ix"].append([k[0] for k in series["I"]])
pulsar_dict["Iy"].append([k[1] for k in series["I"]])
if "Q" in series:
pulsar_dict["Qx"].append([k[0] for k in series["Q"]])
pulsar_dict["Qy"].append([k[1] for k in series["Q"]])
else:
pulsar_dict["Qx"].append([])
pulsar_dict["Qy"].append([])
if "U" in series:
pulsar_dict["Ux"].append([k[0] for k in series["U"]])
pulsar_dict["Uy"].append([k[1] for k in series["U"]])
else:
pulsar_dict["Ux"].append([])
pulsar_dict["Uy"].append([])
if "V" in series:
pulsar_dict["Vx"].append([k[0] for k in series["V"]])
pulsar_dict["Vy"].append([k[1] for k in series["V"]])
else:
pulsar_dict["Vx"].append([])
pulsar_dict["Vy"].append([])
#sort by frequency
if len(pulsar_dict["freq"]) > 0:
pulsar_dict = sort_pulsar_dict(pulsar_dict)
else:
raise NoEPNDBError("Pulsar not on the EPNDB!")
return pulsar_dict | 5,333,072 |
def apply_templates(X, templates):
"""
Generate features for an item sequence by applying feature templates.
A feature template consists of a tuple of (name, offset) pairs,
where name and offset specify a field name and offset from which
the template extracts a feature value. Generated features are stored
in the 'F' field of each item in the sequence.
@type X: list of mapping objects
@param X: The item sequence.
@type template: tuple of (str, int)
@param template: The feature template.
"""
for template in templates:
name = '|'.join(['%s[%d]' % (f, o) for f, o, p in template])
for t in range(len(X)):
values = []
total_matched = True
for field, offset, pattern in template:
p = t + offset
if p not in range(len(X)):
values = []
break
match = pattern.search(X[p][field])
if not match:
total_matched = False
break
values.append(pattern.pattern)
if total_matched and values:
X[t]['F'].append('%s=%s' % (name, '|'.join(values))) | 5,333,073 |
def _variants_fields(fields, exclude_fields, info_ids):
"""Utility function to determine which fields to extract when loading
variants."""
if fields is None:
# no fields specified by user
# by default extract all standard and INFO fields
fields = config.STANDARD_VARIANT_FIELDS + info_ids
else:
# fields have been specified
for f in fields:
# check for non-standard fields not declared in INFO header
if f not in config.STANDARD_VARIANT_FIELDS and f not in info_ids:
# support extracting INFO even if not declared in header,
# but warn...
print('WARNING: no INFO definition found for field %s' % f,
file=sys.stderr)
# process any exclusions
if exclude_fields is not None:
fields = [f for f in fields if f not in exclude_fields]
return tuple(f for f in fields) | 5,333,074 |
def make_rta_basecalls_1460(intensities_dir):
"""
Construct an artificial RTA Intensities parameter file and directory
"""
basecalls_dir = os.path.join(intensities_dir, 'BaseCalls')
if not os.path.exists(basecalls_dir):
os.mkdir(basecalls_dir)
param_file = os.path.join(TESTDATA_DIR, 'rta_basecalls_config.xml')
shutil.copy(param_file, os.path.join(basecalls_dir, 'config.xml'))
return basecalls_dir | 5,333,075 |
def calc_very_restricted_wage_distribution(df):
"""Compute per-period mean and std of wages for agents under two choice restrictions."""
return (
df.query("Policy == 'veryrestricted' and Choice == 'a' or Choice == 'b'")
.groupby(["Period"])["Wage"]
.describe()[["mean", "std"]]
) | 5,333,076 |
def eda2(data: pd.DataFrame):
"""
eda_metricで得たdataを可視化するメソッドver.2
:param data:
:return:
"""
print(f"Label Count: {data['label'].sum()}, Query Count: {len(data)}")
keys = ["SWEM", "BERT", "SBERT", "Ensemble"]
for key in keys:
data[f"{key}_rank"] = data[key].rank(ascending=False)
correct_data = data[data.label == 1]
for text, swem, bert, sbert, ensemble in zip(correct_data["text"], correct_data["SWEM_rank"],
correct_data["BERT_rank"], correct_data["SBERT_rank"],
correct_data["Ensemble_rank"]):
print(text.replace("\n", "[SEP]"), int(swem), int(bert), int(sbert), int(ensemble)) | 5,333,077 |
def compute_boundary_distance(idx_row, params, path_out=''):
""" compute nearest distance between two segmentation contours
:param (int, str) idx_row:
:param dict params:
:param str path_out:
:return (str, float):
"""
_, row = idx_row
name = os.path.splitext(os.path.basename(row['path_image']))[0]
img = load_image(row['path_image'], params['img_type'])
segm = load_image(row['path_segm'], '2d_segm')
logging.debug('segment SLIC...')
slic = seg_spx.segment_slic_img2d(img, params['slic_size'], params['slic_regul'], params['slico'])
_, dists = seg_lbs.compute_boundary_distances(segm, slic)
if os.path.isdir(path_out):
logging.debug('visualise results...')
fig = tl_visu.figure_segm_boundary_dist(segm, slic)
fig.savefig(os.path.join(path_out, name + '.jpg'))
plt.close(fig)
return name, np.mean(dists) | 5,333,078 |
def distrib_one_v_max(
adata: anndata,
celltype: str,
ax,
gene_highlight: Iterable[str],
partition_key: str = "CellType",
):
"""
Parameters
----------
adata
The corrected expression data.
celltype
Celltype to be plotted
gene_highlight
List of genes to highlight on the plot, usually marker genes
partition_key
The key in adata.obs corresponding to the annotations to be used.
Returns
-------
"""
one_v_max = one_v_max_matrix(adata, partition_key=partition_key)
to_plot = one_v_max.loc[celltype]
to_plot_trunc = to_plot[
to_plot > 1
] # The only relevant one_v_max are the ones >1
gene_highlight = list(set(gene_highlight) & set(list(to_plot_trunc.index)))
if gene_highlight:
to_plot_highlight = to_plot_trunc[gene_highlight]
to_plot_trunc = to_plot_trunc.drop(gene_highlight)
out = sns.stripplot(
y=to_plot_trunc, ax=ax, orient="v", s=3, linewidth=0.25
)
out = sns.stripplot(
y=to_plot_highlight, ax=ax, orient="v", color="red", linewidth=0.5
)
out.axes.set_xticks([])
if len(gene_highlight) == 1:
spe_ovm = round(to_plot_highlight[0], 2)
out.axes.set_title(
label=f"{gene_highlight[0]} \n "
f"{to_plot_trunc.name} onevmax={spe_ovm}",
loc="center",
fontsize="small",
)
else:
out.axes.set_title(
label=f"{to_plot_trunc.name}", loc="center", fontsize="small"
)
out.axes.set_ylabel(ylabel="")
else:
out = sns.stripplot(
y=to_plot_trunc, ax=ax, orient="v", s=3, linewidth=0.25
)
out.axes.set_title(
label=f"{to_plot_trunc.name}", loc="center", fontsize="small"
)
out.axes.set_ylabel(ylabel="")
return out | 5,333,079 |
def create_angler(request, report_a_tag=False):
"""This view is used to create a new tag reporter / angler.
when we create a new angler, we do not want to duplicate entries
with the same first name and last name by default. If there
already angers with the same first and last name, add them to the
reponse we will return with the form and ask the user to confirm
that this new user really does have the same name as and existing
(but different) angler.
"""
if request.method == "POST":
form = CreateJoePublicForm(request.POST)
if form.is_valid():
angler = form.save()
if report_a_tag:
return redirect("tfat:report_a_tag_angler_reports", angler_id=angler.id)
else:
return redirect("tfat:angler_reports", angler_id=angler.id)
else:
first_name = form.cleaned_data.get("first_name")
last_name = form.cleaned_data.get("last_name")
anglers = JoePublic.objects.filter(
first_name__iexact=first_name, last_name__iexact=last_name
).all()
if len(anglers):
return render(
request,
"tfat/angler_form.html",
{
"form": form,
"anglers": anglers,
"report_a_tag": report_a_tag,
"action": "Create New ",
},
)
else:
form = CreateJoePublicForm()
return render(
request,
"tfat/angler_form.html",
{"form": form, "report_a_tag": report_a_tag, "action": "Create New "},
) | 5,333,080 |
def override_template(view, template):
"""Override the template to be used.
Use override_template in a controller method in order to change the template
that will be used to render the response dictionary dynamically.
The ``view`` argument is the actual controller method for which you
want to replace the template.
The ``template`` string passed in requires that
you include the template engine name, even if you're using the default.
So you have to pass in a template id string like::
"genshi:myproject.templates.index2"
future versions may make the `genshi:` optional if you want to use
the default engine.
"""
try:
engines = view.decoration.engines
except:
return
for content_type, content_engine in engines.items():
tmpl = template.split(':', 1)
tmpl.extend(content_engine[2:])
try:
override_mapping = request._override_mapping
except AttributeError:
override_mapping = request._override_mapping = {}
override_mapping.setdefault(default_im_func(view), {}).update({content_type: tmpl}) | 5,333,081 |
def peak_1d_binary_search_iter(nums):
"""Find peak by iterative binary search algorithm.
Time complexity: O(logn).
Space complexity: O(1).
"""
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < nums[mid + 1]:
# If mid < its right, search right part.
left = mid + 1
elif nums[mid] < nums[mid - 1]:
# If mid < its left, search left part.
right = mid - 1
else:
# Else, found peak.
return mid
# For left = right.
return left | 5,333,082 |
def add_site_users_sheet(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='Total_Sites_MNO'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='Total_Sites_MNO'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=IFERROR((Users_km2!{}*Area!{})/Total_Sites_MNO!{},0)".format(cell,cell,cell)
ws[cell] = part1
ws.formula_attributes[cell] = {'t': 'array', 'ref': "{}:{}".format(cell, cell)}
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws | 5,333,083 |
def process(document, rtype=None, api=None):
""" Extracts named entities in specified format from given texterra-annotated text."""
entities = []
if annotationName in document['annotations']:
if rtype == 'entity':
for token in document['annotations'][annotationName]:
entities.append((document['text'][token['start']: token['end']], token['value']['tag']))
else: # rtype == 'full':
for token in document['annotations'][annotationName]:
entities.append((token['start'], token['end'], document['text'][token['start']: token['end']],
token['value']['tag']))
return entities | 5,333,084 |
def get_out_hmm_path(new_afa_path):
"""Define an hmm file path for a given aligned fasta file path.
"""
new_exten = None
old_exten = new_afa_path.rsplit('.', 1)[1]
if old_exten == 'afaa':
new_exten = 'hmm'
elif old_exten == 'afna':
new_exten = 'nhmm'
# Check that it worked.
assert new_exten is not None, """Error: HMM extension could not be
determined for input file: %s""" % query_file
# Get new path.
hmm_file_path = new_afa_path.rsplit('.', 1)[0] + '.' + new_exten
# Return the new path.
return hmm_file_path | 5,333,085 |
def log_predictions(logger, config):
"""
Log network predictions
:param logger: logger instance
:param config: dictionary with configuration options
"""
network = net.ml.VGGishNetwork(
model_configuration=config["vggish_model_configuration"],
categories_count=len(config["categories"]))
session = tf.keras.backend.get_session()
model = net.ml.SSDModel(session, network)
model.load(config["model_checkpoint_path"])
validation_samples_loader = net.data.VOCSamplesDataLoader(
data_directory=config["voc"]["data_directory"],
data_set_path=config["voc"]["validation_set_path"],
categories=config["categories"],
size_factor=config["size_factor"])
default_boxes_factory = net.ssd.DefaultBoxesFactory(model_configuration=config["vggish_model_configuration"])
iterator = iter(validation_samples_loader)
for _ in tqdm.tqdm(range(40)):
log_single_prediction(
logger=logger,
model=model,
default_boxes_factory=default_boxes_factory,
samples_iterator=iterator,
config=config) | 5,333,086 |
def transformer_decoder_block(name,
n_layers,
x,
x_mask,
output_size,
init,
**kwargs):
"""A transformation block composed of transformer decoder layers.
Args:
name: variable scope.
n_layers: number of transformer layers.
x: input to transformation.
x_mask: mask.
output_size: output dimensionality.
init: data-dependent init for weightnorm parameters.
**kwargs: Constains hparams, encoder_output,
encoder_decoder_attention_bias and decoder_self_attention_bias
Returns:
outputs: Tensor of shape [batch_size, length, output_size].
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hparams = kwargs.pop("hparams")
disable_dropout = kwargs.pop("disable_dropout")
if disable_dropout:
hparams = copy.deepcopy(hparams)
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.relu_dropout = 0.0
n_channels = common_layers.shape_list(x)[-1]
if n_channels != hparams.hidden_size:
hparams = copy.deepcopy(hparams)
hparams.hidden_size = n_channels
outputs = common_attention.add_timing_signal_1d(x)
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
for layer_idx in range(n_layers):
outputs = transformer_decoder_layer(
decoder_input=outputs,
layer_idx=layer_idx,
hparams=hparams,
**kwargs)
outputs = common_layers.layer_preprocess(outputs, hparams)
outputs = dense_weightnorm(
"h2o", outputs, output_size, x_mask, init_scale=0.0, init=init)
return outputs | 5,333,087 |
def compute_rolling_norm(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Implement smooth moving average norm (when p_moment >= 1).
Moving average corresponds to compute_ema when min_depth = max_depth = 1.
"""
signal_p = compute_rolling_moment(
signal, tau, min_periods, min_depth, max_depth, p_moment
)
return signal_p ** (1.0 / p_moment) | 5,333,088 |
def msort(liste, indice):
"""
This function sorts a vector regarding values of the indice 'indice'
Indice start from 0
"""
tmp = [[tbl[indice]]+[tbl] for tbl in liste]
tmp.sort()
liste = [cl[1] for cl in tmp]
del tmp
return liste | 5,333,089 |
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Take
blobs: list of blobs to return in addition to output blobs.
kwargs: Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start: optional name of layer at which to begin the forward pass
end: optional name of layer at which to finish the forward pass (inclusive)
Give
outs: {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.items():
if blob.ndim != 4:
raise Exception('{} blob is not 4-d'.format(in_))
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs} | 5,333,090 |
def get_job_details():
"""Reads in metadata information about assets used by the algo"""
job = dict()
job["dids"] = json.loads(os.getenv("DIDS", None))
job["metadata"] = dict()
job["files"] = dict()
job["algo"] = dict()
job["secret"] = os.getenv("secret", None)
algo_did = os.getenv("TRANSFORMATION_DID", None)
if job["dids"] is not None:
for did in job["dids"]:
# get the ddo from disk
filename = "/data/ddos/" + did
print(f"Reading json from {filename}")
with open(filename) as json_file:
ddo = json.load(json_file)
# search for metadata service
for service in ddo["service"]:
if service["type"] == "metadata":
job["files"][did] = list()
index = 0
for file in service["attributes"]["main"]["files"]:
job["files"][did].append(
"/data/inputs/" + did + "/" + str(index)
)
index = index + 1
if algo_did is not None:
job["algo"]["did"] = algo_did
job["algo"]["ddo_path"] = "/data/ddos/" + algo_did
return job | 5,333,091 |
def test_stack_hasnt_script_mandatory_key(sdk_client_fs: ADCMClient):
"""Test upload bundle with action without script field"""
stack_dir = utils.get_data_dir(__file__, "script_mandatory_key")
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
sdk_client_fs.upload_from_fs(stack_dir)
with allure.step("Check error: has no mandatory"):
errorcodes.INVALID_OBJECT_DEFINITION.equal(e, 'There is no required key "script" in map.') | 5,333,092 |
def main():
"""
メイン
"""
# DBファイル名
dbname = 'history.sqlite3'
# DB接続・カーソル取得
conn = sqlite3.connect(dbname)
cur = conn.cursor()
create_sql = """
CREATE TABLE COUNT (
reading TEXT, count INTEGER NOT NULL,
PRIMARY KEY(reading));
"""
cur.executescript(create_sql)
create_sql = """
CREATE TABLE HISTORY (
word TEXT,
PRIMARY KEY(word));
"""
cur.executescript(create_sql)
conn.commit()
conn.close() | 5,333,093 |
def mre(actual: np.ndarray, predicted: np.ndarray, benchmark: np.ndarray = None):
""" Mean Relative Error """
# return np.mean( np.abs(_error(actual, predicted)) / (actual + EPSILON))
return np.mean(_relative_error(actual, predicted, benchmark)) | 5,333,094 |
def _get_fusion_kernel(patch_size, fusion='gaussian', margin=0):
"""
Return a 3D kernel with the same size as a patch
that will be used to assign weights to each voxel of a patch
during the patch-based predictions aggregation.
:param patch_size: int or tuple; size of the patch
:param fusion: string; type of fusion.
:param margin: int; if margin > 0, mask the prediction at
a distance less margin to the border. Default value is 0.
If you the 'gaussian' mode you should not need to use
margin > 0.
"""
if isinstance(patch_size, int):
shape = (patch_size, patch_size, patch_size)
else:
shape = patch_size
assert np.all(np.array(shape) > 2 * margin), \
"Margin %d is too large for patch size %s" % (margin, str(shape))
# Create the kernel
if fusion == 'uniform': # Uniform kernel (default)
kernel = np.ones(shape)
elif fusion == 'gaussian': # Gaussian kernel
# Define the gaussian kernel
sigma = 1.
dist_border_center = 3 * sigma
x, y, z = np.meshgrid(
np.linspace(-dist_border_center, dist_border_center, shape[1]),
np.linspace(-dist_border_center, dist_border_center, shape[0]),
np.linspace(-dist_border_center, dist_border_center, shape[2]),
)
d = x*x + y*y + z*z
kernel = np.exp(-d / (2. * sigma**2))
else:
error_msg = "Only the fusion strategy %s are supported. Received %s" % \
(str(SUPPORTED_FUSION), fusion)
raise ArgumentError(error_msg)
# (optional) Set the contribution of voxels at distance less than margin
# to the border of the patch to 0
if margin > 0:
kernel[:margin, :, :] = 0.
kernel[-margin:, :, :] = 0.
kernel[:, :margin, :] = 0.
kernel[:, -margin:, :] = 0.
kernel[:, :, :margin] = 0.
kernel[:, :, -margin:] = 0.
return kernel | 5,333,095 |
def __validate_node__(node: dict):
""" Validate that a given nodes parameters are valid.
Expected signature:
{'class_name': 'NewWorker', 'parent': 'SpectrumCoordinator',
'args': (), 'kwargs':{}}
Args:
- node: node signature to validate.
"""
if not isinstance(node['class_name'], str):
raise TypeError('Class name given {} is not type str.'
.format(node['class_name']))
if not isinstance(node['parent'], str):
raise TypeError('Parent given {} is not of type str.'
.format(node['parent']))
if not isinstance(node['init_args'], (list, tuple)):
raise TypeError('Init_args {} is not of type list or tuple.'
.format(node['init_args']))
if not isinstance(node['kwargs'], dict):
raise TypeError('Kwargs {} is not of type dict.'
.format(node['kwargs'])) | 5,333,096 |
def transforma(
vetor: list, matriz_linha: bool = True, T: callable = lambda x: transposta(x)
) -> list:
"""Transforma um vetor em uma matriz linha ou matriz coluna."""
matriz = []
if matriz_linha:
matriz.append(vetor)
else:
matriz.append(vetor)
matriz = T(matriz)
return matriz | 5,333,097 |
def trace_partition_movement(
points,
distance,
interval,
break_interval=None,
include_inaccurate=True):
"""Wrapper to optionally split stops at data gaps greater than
break_interval."""
pll = break_interval \
and trace_split_sparse(points, break_interval) \
or [points]
for pl in pll:
for seg in trace_partition_movement_nobreak(
pl, distance, interval, include_inaccurate):
yield seg | 5,333,098 |
def post(event, context):
"""
:param event: AWS Log Event.
:param context: Object to determine runtime info of the Lambda function.
See http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html for more info
on context.
Process an AWS Log event and post it to a Slack Channel.
"""
WEBHOOK = read_webhook()
event_processed = process_subscription_notification(event)
for log_event in event_processed['logEvents']:
message = log_event['message']
for entry in NO_ALERT:
if entry in message:
return "Success"
headers = {
"content-type": "application/json"}
datastr = json.dumps({
'attachments': [
{
'color': determine_message_color(event_processed, message),
'pretext': get_account_alias(),
'author_name': determine_region(),
'text': message
}
]})
request = Request(WEBHOOK, headers=headers, data=datastr)
uopen = urlopen(request)
rawresponse = ''.join(uopen)
uopen.close()
assert uopen.code == 200
return "Success" | 5,333,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.