content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import os
def check_url_note(url, metadata):
"""
Checks the url note for urls that can be processed for additional files
Initially this is just newspaper.com clippings.
Returns True if the url needs to be processed, false if it doesn't
"""
# The check value and the toml value
check_dict = {"https://www.newspapers.com/clip/": "clipping"}
for check_value in check_dict:
if check_value in url:
if check_dict[check_value] in metadata:
if not os.path.isfile(metadata[check_dict[check_value]]):
return True
else:
continue
else:
return True
return False | a1542303644b2575af2b478773f2f557b597b6e9 | 41,104 |
def getSets(variable):
"""Get all sets of adjectives in this variable."""
sets = {}
for a_name,adj in variable.adjectives.items():
sets[a_name] = adj.set
return sets | f292548c5a1260a6ca976619d248118b74fb4af8 | 41,106 |
import random
def roll_unweighted_die(weights=None):
"""Returns the result of an unweighted die.
This uses a fair
Args:
weights: (integer array) a collection of the percentage chances each
result of the die has. The number of sides is determined by
the number of provided weights, and the sum of the weights
must be 1.0 (100%)
Raises:
ValueError: Provided weights did not add up to 1 (100%)
"""
if weights is None or sum(weights) != 1:
raise ValueError('Weight (%s) do not add to 1.0' % (weights))
# Roll the die
fair_roll = random.random()
# Which range did the selected value fall into?
res = 0
for weight in weights:
res = res + 1
if fair_roll <= weight:
return res
# If weights are [0.3, 0.2, 0.5], and 0.45 is rolled, removing the
# weight from the roll allows us to compare it to that section the
# range, compared to the previous possible values
fair_roll -= weight
# This can happen because of floating point inaccuracies. Just use the
# last value if we leave the loop
return res | 34a8502677b21b59c380bf7fa62eab8aac67959a | 41,108 |
import hashlib
def md5checksum(afilepath):
""" md5checksum
Calculates the MD5 checksum for afilepath
"""
with open(afilepath, 'rb') as filehandler:
calc_md5 = hashlib.md5()
while True:
data = filehandler.read(8192)
if not data:
break
calc_md5.update(data)
return calc_md5.hexdigest() | d8d4711e4657514672e455a3374e3ec400636259 | 41,109 |
def build_logfile_path(logname, logType, threadId, log_num=1):
""" Helper to create a logfile path incorporating the thread ID in the filename before the file type suffix.
@param logname: The valid base path without the thread Id.
@type logname: Str
@param logType: The type of log (e.g. fuzzing)
@type logType: Str
@param threadId: The thread id to be inserted into the path.
@type threadId: Int
@param log_num: The current log number of this type. Used when chunking logs
@type log_num: Int
@return: Formatted logfile path
@rtype : Str
"""
typeStart = logname.rfind(".txt")
return f"{logname[:typeStart]}.{logType}.{threadId!s}.{log_num!s}{logname[typeStart:]}" | d0c8e7d27e8d7b8ca978bd4b569ec74848f76580 | 41,111 |
def move_to_end(x, dim):
"""
Moves a specified dimension to the end.
"""
N = len(x.shape)
if dim < 0:
dim = N + dim
permute_indices = list(range(N))
permute_indices.remove(dim)
permute_indices.append(dim)
return x.permute(permute_indices) | dad4bbceb2c6e5519afee2a3d94e5b06cec222bb | 41,112 |
def wrap_string(s, startstring= "(u'",
endstring = "')", wrap=67):
"""Line-wrap a unicode string literal definition."""
c = len(startstring)
contstring = "'\n" + ' ' * (len(startstring)-2) + "u'"
l = [startstring]
for ch in s.replace("'", r"\'"):
c += 1
if ch == '\\' and c > wrap:
c = len(startstring)
ch = contstring + ch
l.append(ch)
l.append(endstring)
return ''.join(l) | a1a266825355f2f54dde61fec2a13a97544a28c2 | 41,113 |
import re
def count_match(file, pattern="$.*^"):
"""Counts the number of lines matching the passed regular expression"""
# print([*re.finditer(re.compile(pattern, re.M), file.read())])
return len([*re.finditer(re.compile(pattern, re.M), file.read())])
# count = 0
# for line in file.readlines():
# if re.search(pattern, line):
# count += 1
# return count | ab0aba02a43269b4d34579c04461131430a968bb | 41,114 |
def _find_t(e, annotations):
"""
Given an "E" annotation from an .ann file, find the "T" annotation.
Because "E" annotations can be nested, the search should be done on deeper
levels.
:param e: (string) the "E" annotation we want to find the target of.
:param annotations: (dict) the dict of annotations.
:return: the keys of "T" annotations this e annotation points to.
"""
e = e.split()
keys = []
if len(e) > 1:
targetkeys = [y for y in [x.split(":")[1] for x in e[1:]]]
for key in targetkeys:
if key[0] == "E":
keys.append(annotations['E'][key[1:]].split()[0].split(":")[1])
if key[0] == "T":
keys.append(key)
return keys | b10c097ed98548d6a51447e3dd23140deef44818 | 41,117 |
def screenRegion(gfx, region=(0.0, 0.0, 1.0, 1.0)):
"""(gfx, 4-tuple of floats) -> (4-tuple of ints)
Determine the absolute coordinates of a screen region from its
relative coordinates (coordinates from 0.0 to 1.0)
"""
w, h = gfx.getSize()
x1 = (w - 1) * region[0]
y1 = (h - 1) * region[1]
x2 = (w - 1) * region[2]
y2 = (h - 1) * region[3]
if type(w) == type(1):
x1 = int(x1 + 0.5)
y1 = int(y1 + 0.5)
x2 = int(x2 + 0.5)
y2 = int(y2 + 0.5)
return (x1, y1, x2, y2) | 35ef5e208bc1cd6279adaf2fef6b7dfc74830dfe | 41,119 |
def one_vs_all_func(classes, table, TP, TN, FP, FN, class_name):
"""
One-vs-all mode handler.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TP: true positive
:type TP: dict
:param TN: true negative
:type TN: dict
:param FP: false positive
:type FP: dict
:param FN: false negative
:type FN: dict
:param class_name: target class name for one-vs-all mode
:type class_name: any valid type
:return: [classes, table ] as list
"""
try:
report_classes = [str(class_name), "~"]
report_table = {str(class_name): {str(class_name): TP[class_name],
"~": FN[class_name]},
"~": {str(class_name): FP[class_name],
"~": TN[class_name]}}
return [report_classes, report_table]
except Exception:
return [classes, table] | 2b94aa55f4eca068ef32b65ed0b58095e96c3682 | 41,120 |
def split(iterable, function):
"""
Split an iterable into two lists according to test function
:param iterable iterable: iterable of values to be split
:param function function: decision function ``value => bool``
:returns: tuple(
list with values for which function is `True`,
list with values for which function is `False`,)
Example
_______
>>> split([1,2,3,4,5,6], lambda x: x<3)
([1, 2], [3, 4, 5, 6])
"""
match = []
unmatch = []
for value in iterable:
if function(value):
match.append(value)
else:
unmatch.append(value)
return match, unmatch | ede20fcc80bd126410a8417d1e91dee2e530c9af | 41,121 |
def total_coeffs_up_to(L):
"""The total number of coefficients to a maximum degree, L
L*(L + 2)
(L + 1)**2 - 1
lambda L: sum([num_coeffs(l) for l in range(1, L+1)])
"""
return L*(L + 2) | e711dddc992ba3e9f6ef7bc1b6966bde4108eb93 | 41,122 |
def split_dictionary(output_dict):
"""Splits a dictionary into two lists for IDs and its full file paths
Parameters
----------
output_dict : dictionary
A dictionary with keys: 'id_llamado' and 'fullpath'
Returns
-------
Two lists
Two lists of 'id_llamado' and 'fullpath'
"""
id_llamado = output_dict['id_llamado']
filenames = output_dict['fullpath']
return filenames, id_llamado | 33958b60a0e7ca02595c688b1cf7642b6d8a56cb | 41,125 |
def get_state(pins):
"""Get state from pins"""
return pins | 9d38848e389398ab394cab6b1ee8bb350d7f2b23 | 41,126 |
from pathlib import Path
import re
import click
def ensure_valid_name(node_dir: Path, node_type: str, node_name: str) -> str:
"""Checks the validity of the specified node_name. Also checks if it
already exists.
"""
if re.match(r"^[a-zA-Z][\w\-]*[^\W_]$", node_name) is None:
raise click.exceptions.UsageError("Invalid node name!")
if (node_dir / node_type / f"{node_name}.py").exists():
raise click.exceptions.UsageError("Node name already exists!")
return node_name | aa4b7e41aed747439ddfd87c6500f99acd9dcd21 | 41,128 |
def get_content(html_code):
""" Separates the message section from the content section and returns the for further process
:parameters
html_code (str) html code of the downloaded
:returns
tuple (lists) the tuple returned contains two lists of strings. Each list item is a line
"""
message_list, content_list = [], []
for line in html_code.split('\n'):
if line.startswith('#') and not line.endswith('#'):
message_list.append(line)
elif not line.endswith('#'):
content_list.append(line)
return message_list, content_list | debaab77f55d9328cd037d6fffc51c8a7f67758a | 41,130 |
import copy
def division_algorithm(first_polynomial, second_polynomial):
"""
Returns the quotient and reminder of the division between first and second polynomials.
:param first_polynomial: a polynomial function with the degree greater or equal to the second polynomial function,
represented as a vector of coefficients
:param second_polynomial: a polynomial function with the degree lesser or equal to the dirst polynomial function,
represented as a vector of coefficient
:return quotient, reminder: quotient and reminder of the division, returns None if requirements are not
reached
"""
if not first_polynomial or not second_polynomial or len(first_polynomial) < len(second_polynomial):
print("[ERROR]: Requirements are not reached.")
return None
quotient = [0 for index in range(len(first_polynomial) - len(second_polynomial) + 1)]
polynomial = copy.deepcopy(first_polynomial)
index = 0
while index < len(quotient):
quotient[index] = polynomial[index] / second_polynomial[0]
for index_poly in range(len(second_polynomial)):
polynomial[index + index_poly] -= quotient[index] * second_polynomial[index_poly]
index += 1
polynomial = [number for number in polynomial if number]
if not polynomial:
polynomial.append(0)
return quotient, polynomial | f21db3777f1f4df2f22f6bcdd802956bf7f88219 | 41,131 |
def read_words_from_file(path):
"""
Reads the content of a file and returns the content, split by space-character.
:param path: The path of the file.
:return: A list of words in this file.
"""
file_obj = open(path, "r")
content = file_obj.read()
return content.split(" ") | 960ff1d54b0c37211fc06879a74124ff2e60872a | 41,132 |
import os
def find_images(filenames: list, folder: str, formats: list):
"""Returns list of image abspaths for a folder if format in 'formats'"""
all_photos = [os.path.join(folder, file) for file in filenames if
file.lower().endswith(tuple(formats))]
return all_photos | bfe87901a6e1fa14c8cc5cfd5b85206f85c73175 | 41,133 |
def generate_query_id(query_client, branch):
"""
Build the body of the query and return its ID
"""
model_name, explore_name, fields, starting_field_count = branch
query_body = {'limit': '1'} # Limit the number of rows in the query
query_body['model'] = model_name
query_body['view'] = explore_name
query_body['fields'] = fields
query_response = query_client.create_query(body=query_body)
return query_response.id | 3793db433922d5c029660b217be63dff20b39ae1 | 41,134 |
import sys
def check_packed_shards_logical(fs_format, min_unpacked_rev):
"""Check if repository with logical addressing has packed shards."""
if fs_format[2] == "logical" and min_unpacked_rev > 0:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stderr.write("Packed shards with logical addressing cannot be analyzed or unpacked.\n")
sys.stderr.flush()
sys.exit(1)
return False | 3c456aeb0a20a7109d64f9acb5436c2050a6849f | 41,135 |
import torch
def beams(ctc_probs, beam_size: int, alphabet: str, blank_index: int):
"""
Return top-beam_size beams using CTC prefix beam search,
adapted from https://github.com/wenet-e2e/wenet/blob/829d2c85e0495094636c2e7ab7a24c29818e1eff/wenet/transformer/asr_model.py#L329
"""
# cur_hyps: (prefix, (blank_ending_score, non_blank_ending_score))
cur_hyps: list[tuple[str, tuple[float, float]]] = [("", (float(0.0), -float('inf')))]
for logp in ctc_probs:
# key: prefix, value (pb, pnb), default value(-inf, -inf)
next_hyps: dict[str, tuple[float, float]] = {}
neginf = (-float('inf'), -float('inf'))
_, top_k_indices = logp.topk(beam_size)
for s in top_k_indices:
s = int(s.item())
ps = logp[s].item()
for prefix, (pb, pnb) in cur_hyps:
last = prefix[-1] if len(prefix) > 0 else ""
if s == blank_index:
n_pb, n_pnb = next_hyps.get(prefix, neginf)
n_pb = torch.tensor([n_pb, float(pb + ps), float(pnb + ps)]).logsumexp(dim=0).item()
next_hyps[prefix] = (n_pb, n_pnb)
elif alphabet[s] == last:
# Update *ss -> *s
n_pb, n_pnb = next_hyps.get(prefix, neginf)
n_pnb = torch.tensor([n_pnb, float(pnb + ps)]).logsumexp(dim=0).item()
next_hyps[prefix] = (n_pb, n_pnb)
# Update *sεs -> *ss
n_prefix = prefix + alphabet[s]
n_pb, n_pnb = next_hyps.get(n_prefix, neginf)
n_pnb = torch.tensor([n_pnb, float(pb + ps)]).logsumexp(dim=0).item()
next_hyps[n_prefix] = (n_pb, n_pnb)
else:
n_prefix = prefix + alphabet[s]
n_pb, n_pnb = next_hyps.get(n_prefix, neginf)
n_pnb = torch.tensor([n_pnb, float(pb + ps), float(pnb + ps)]).logsumexp(dim=0).item()
next_hyps[n_prefix] = (n_pb, n_pnb)
next_hyps_list: list[tuple[str, tuple[float, float]]] = list(next_hyps.items())
_, indices = torch.tensor([float(torch.tensor([float(hyp[1][0]), float(hyp[1][1])]).logsumexp(dim=0))
for hyp in next_hyps_list]).topk(beam_size)
cur_hyps = [next_hyps_list[index] for index in indices]
return cur_hyps | 8dd9813bfa480ede4f4a0b69a3b33c70bcb728a0 | 41,137 |
def asset_icon_name(asset_type_name: str) -> str:
"""Icon name for this asset type.
This can be used for UI html templates made with Jinja.
ui.__init__ makes this function available as the filter "asset_icon".
For example:
<i class={{ asset_type.name | asset_icon }}></i>
becomes (for a battery):
<i class="icon-battery"></i>
"""
# power asset exceptions
if "evse" in asset_type_name.lower():
return "icon-charging_station"
# weather exceptions
if asset_type_name == "radiation":
return "wi wi-horizon-alt"
elif asset_type_name == "temperature":
return "wi wi-thermometer"
elif asset_type_name == "wind_direction":
return "wi wi-wind-direction"
elif asset_type_name == "wind_speed":
return "wi wi-strong-wind"
# aggregation exceptions
elif asset_type_name == "renewables":
return "icon-wind"
return f"icon-{asset_type_name}" | 43b044ce6718cca173e205a615b9ec2cfd361db9 | 41,139 |
def should_trade(strategy, date, previous_trade_date):
"""Determines whether a trade is happening for the strategy."""
# We invest the whole value, so we can only trade once a day.
if (previous_trade_date and
previous_trade_date.replace(hour=0, minute=0, second=0) ==
date.replace(hour=0, minute=0, second=0)):
return False
# The strategy needs to be active.
if strategy["action"] == "hold":
return False
# We need to know the stock price.
if not strategy["price_at"] or not strategy["price_eod"]:
return False
return True | 8c52eb554673bb0badff4a55c8e3a11cf9392a47 | 41,140 |
import re
def parser(filename):
"""
Parses a textfile in search of battery current information
:param filename: File to be parsed
:return:
"""
maximum = 0
log_data = open(filename, 'r')
for line in log_data:
m = re.search('current: (\d+\.\d*)', line)
if m is None:
continue # no battery information in this line
voltage = float(m.group(1))
if voltage > maximum:
maximum = voltage
return maximum | def3367b0341b5f967afb60886fb581a2d6f65e9 | 41,142 |
from functools import reduce
import operator
def prod(a, start=1):
"""Return product of elements of a. Start with int 1 so if only
ints are included then an int result is returned.
Examples
========
>>> from sympy import prod, S
>>> prod(range(3))
0
>>> type(_) is int
True
>>> prod([S(2), 3])
6
>>> _.is_Integer
True
You can start the product at something other than 1:
>>> prod([1, 2], 3)
6
"""
return reduce(operator.mul, a, start) | 844347483f2c03c0ce7121180f9ae23855846fd3 | 41,143 |
import argparse
def parseCommandLineArguments():
"""
Parses the arguments provided through command line.
Launch python find_y2h_seq_candidates.py --help for more details
"""
parser = argparse.ArgumentParser(prog="NGPINT.py",description="This pipeline can be used to find potential interactors of the bait. Please make sure that the following softwares are available in your path. We recommend that you trim adapters from your libraries and then supply them as selected and/or background files.")
optional_arg = parser.add_argument_group("Optional Arguments")
required_arg = parser.add_argument_group("Required Arguments")
parser.add_argument("--all_arguments","-a",help="Enter the csv file which has all the information. No other argument is needed. Do NOT change the order in which the entries appear in this file. Do not add or remove any entry. For optional arguments the last column can be left blank.",default=None,required=True)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
# SUPRESSED ARGUMENTS
parser.add_argument("--selected_filename",help=argparse.SUPPRESS) # Separates the name of the selected file from its path
parser.add_argument("--selected_path",help=argparse.SUPPRESS) # Extracts the path of each selected file
parser.add_argument("--selected_sample_adapter_trimmed",help=argparse.SUPPRESS) # Adapter trimmed Filename
parser.add_argument("--selected_sample_adapter_trimmed_error_file",help=argparse.SUPPRESS) # Error file for adapter trimming
parser.add_argument("--selected_sample_N_removed",help=argparse.SUPPRESS) # Name of each selected file with all non-ATGC nucleotides removed
parser.add_argument("--selected_sample_STAR_prefix_round1",help=argparse.SUPPRESS) # In Round1 all reads will be mapped to detect fusion reads. This is just the prefix NOT the filename
parser.add_argument("--selected_sample_STAR_round1_output",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_STAR_round1_error",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_STAR_genome_filename_round1",help=argparse.SUPPRESS) # Bamfilename of the genome mapped file
parser.add_argument("--selected_sample_STAR_transcriptome_bamfilename_round1",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file
parser.add_argument("--selected_sample_STAR_prefix_round2",help=argparse.SUPPRESS) # In Round2 fusion reads which were mapped to the vectors will be trimmed and mapped back
parser.add_argument("--selected_sample_STAR_round2_output",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_STAR_round2_error",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_STAR_genome_filename_round2",help=argparse.SUPPRESS) # Bamfilename of the genome mapped file
parser.add_argument("--selected_sample_STAR_transcriptome_bamfilename_round2",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file
parser.add_argument("--selected_sample_STAR_transcriptome_bamfilename_round2_fusion_reads",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file containing only fusion reads
parser.add_argument("--selected_sample_trimming_stats",help=argparse.SUPPRESS) # Contains information related to vector trimming like number of reads trimmed etc.
parser.add_argument("--selected_sample_all_reads_vector_trimmed",help=argparse.SUPPRESS) # Contains all the reads including vector trimmed ones and reads which do not contain any vector sequence
parser.add_argument("--selected_sample_fusion_reads",help=argparse.SUPPRESS) # Contains only the fusion reads
parser.add_argument("--selected_sample_genome_browser",help=argparse.SUPPRESS) # Samfilename for all reads to be viewed in Genome Browser
parser.add_argument("--selected_sample_genome_browser_per_replicate",help=argparse.SUPPRESS) # Samfilename for all reads to be viewed in Genome Browser per replicate
parser.add_argument("--selected_sample_salmon_counts_outputfile",help=argparse.SUPPRESS) # Counts file for salmon counts
parser.add_argument("--selected_sample_salmon_counts_error",help=argparse.SUPPRESS) # Error file for salmon counts
parser.add_argument("--selected_sample_transcriptome_coverage_bed_all_reads",help=argparse.SUPPRESS) # Bed file to store the mappings counts for each transcript
parser.add_argument("--selected_sample_transcriptome_coverage_bed_fusion_reads",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_transcriptome_coverage_bed_all_reads_splits",help=argparse.SUPPRESS) # Bed file to store the mappings counts for each transcript
parser.add_argument("--selected_sample_transcriptome_coverage_bed_fusion_reads_splits",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_idxstats_filename_all_reads",help=argparse.SUPPRESS) # Idxstats file for storing number of reads mapped to a transcript
parser.add_argument("--selected_sample_idxstats_filename_fusion_reads",help=argparse.SUPPRESS) # Idxstats file for storing number of fusion reads mapped to a transcript
parser.add_argument("--selected_sample_graph_info_filename",help=argparse.SUPPRESS) # Information about graphs
parser.add_argument("--selected_sample_for_cross_library_analysis",help=argparse.SUPPRESS) # selected_sample_for_cross_library_analysis
parser.add_argument("--selected_sample_amplicon_filename",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_transcript_read_coverage",help=argparse.SUPPRESS)
parser.add_argument("--selected_sample_per_read_log",help=argparse.SUPPRESS)# Log to describe each read
parser.add_argument("--background_sample",help=argparse.SUPPRESS) # Separates the name of the selected file from its path
parser.add_argument("--background_path",help=argparse.SUPPRESS) # Extracts the path of each selected file
parser.add_argument("--background_sample_adapter_trimmed",help=argparse.SUPPRESS) # Adapter trimmed Filename
parser.add_argument("--background_sample_adapter_trimmed_error_file",help=argparse.SUPPRESS) # Error file for adapter trimming
parser.add_argument("--background_sample_N_removed",help=argparse.SUPPRESS) # Name of each selected file with all non-ATGC nucleotides removed
parser.add_argument("--background_sample_STAR_prefix_round1",help=argparse.SUPPRESS) # In Round1 all reads will be mapped to detect fusion reads. This is just the prefix NOT the filename
parser.add_argument("--background_sample_STAR_round1_output",help=argparse.SUPPRESS)
parser.add_argument("--background_sample_STAR_round1_error",help=argparse.SUPPRESS)
parser.add_argument("--background_sample_STAR_genome_bamfilename_round1",help=argparse.SUPPRESS) # Bamfilename of the genome mapped file
parser.add_argument("--background_sample_STAR_genome_filename_round1",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file
parser.add_argument("--background_sample_STAR_prefix_round2",help=argparse.SUPPRESS) # In Round2 fusion reads which were mapped to the vectors will be trimmed and mapped back
parser.add_argument("--background_sample_STAR_round2_output",help=argparse.SUPPRESS)
parser.add_argument("--background_sample_STAR_round2_error",help=argparse.SUPPRESS)
parser.add_argument("--background_sample_STAR_genome_filename_round2",help=argparse.SUPPRESS) # Bamfilename of the genome mapped file
parser.add_argument("--background_sample_STAR_transcriptome_bamfilename_round2",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file
parser.add_argument("--background_sample_STAR_transcriptome_bamfilename_round2_fusion_reads",help=argparse.SUPPRESS) # Bamfilename of the transcriptome mapped file containing only fusion reads
parser.add_argument("--background_sample_trimming_stats",help=argparse.SUPPRESS) # Contains information related to vector trimming like number of reads trimmed etc.
parser.add_argument("--background_sample_all_reads_vector_trimmed",help=argparse.SUPPRESS) # Contains all the reads including vector trimmed ones and reads which do not contain any vector sequence
parser.add_argument("--background_sample_fusion_reads",help=argparse.SUPPRESS) # Contains only the fusion reads
parser.add_argument("--background_sample_salmon_counts_outputfile",help=argparse.SUPPRESS) # Counts file for salmon counts
parser.add_argument("--background_sample_salmon_counts_error",help=argparse.SUPPRESS) # Error file for salmon counts
parser.add_argument("--background_sample_per_read_log",help=argparse.SUPPRESS)# Log to describe each read
parser.add_argument("--temp_output_directory",help=argparse.SUPPRESS)
parser.add_argument("--transcript_to_gene_map",help=argparse.SUPPRESS)
parser.add_argument("--transcriptome",help=argparse.SUPPRESS) # Name of the transcriptome file generated by gffread
parser.add_argument("--transcriptome_index",help=argparse.SUPPRESS) # STAR index of the transcriptome
parser.add_argument("--salmon_normalized_counts",help=argparse.SUPPRESS)
parser.add_argument("--salmon_DGE_filename",help=argparse.SUPPRESS)
parser.add_argument("--salmon_gene_counts_matrix",help=argparse.SUPPRESS) # Combined file containing all the count data
parser.add_argument("--deseq2_normalized_counts",help=argparse.SUPPRESS) # File containing normalized values
parser.add_argument("--deseq2_DGE_output",help=argparse.SUPPRESS) # Output file from DESeq2
parser.add_argument("--os",help=argparse.SUPPRESS)
parser.add_argument("--combined_graph_final",help=argparse.SUPPRESS) # Contains all the information about each enrichment of genes in each of the replicates
parser.add_argument("--run_details_info_csv",help=argparse.SUPPRESS) # Holds information about the runtime of each of the steps and other relevant information
parser.add_argument("--record_time",help=argparse.SUPPRESS)# Holds duration of execution for each step of execution
parser.add_argument("--design_primers_for_transcript",help=argparse.SUPPRESS)# The filename where all transcripts are displayed with in-frame junction information required for primer design
return parser.parse_args() | 40694793169b4020f77b5c7337db3c2d94f93553 | 41,144 |
import random
def form_batches(batch_size, idx):
"""Shuffles idx list into minibatches each of size batch_size"""
idxs = [i for i in idx]
random.shuffle(idxs)
return [idxs[i:(i+batch_size)] for i in range(0,len(idxs),batch_size)] | 2afdc93202f553b29a8f4c68dcf6e226816d9de0 | 41,145 |
def get_translation(pp):
"""Separate intrinsic matrix from translation and convert in lists"""
kk = pp[:, :-1]
f_x = kk[0, 0]
f_y = kk[1, 1]
x0, y0 = kk[2, 0:2]
aa, bb, t3 = pp[0:3, 3]
t1 = float((aa - x0*t3) / f_x)
t2 = float((bb - y0*t3) / f_y)
tt = [t1, t2, float(t3)]
return kk.tolist(), tt | 1058fdc718b05369e338ee1102b5784d8d2704c4 | 41,146 |
def is_multiline(s):
"""Return True if a str consists of multiple lines.
Args:
s (str): the string to check.
Returns:
bool
"""
return len(s.splitlines()) > 1 | 6c1eca6f1d3d449bff6661b2ab3b9cd8695fbf90 | 41,147 |
def _agg_scores_by_key(scores, key, agg_mode='mean'):
"""
Parameters
----------
scores: list or dict
list or dict of {'precision': ..., 'recall': ..., 'f1': ...}
"""
if len(scores) == 0:
return 0
if isinstance(scores, list):
sum_value = sum(sub_scores[key] for sub_scores in scores)
else:
sum_value = sum(sub_scores[key] for _, sub_scores in scores.items())
if agg_mode == 'sum':
return sum_value
elif agg_mode == 'mean':
return sum_value / len(scores) | f289ddcd298b4ce6dbe701e8e729e37fc6d8ea69 | 41,148 |
def get_rows_to_keep(mode, df, grp, samp_grps, qthreshold, min_child_non_leaf, min_child_nsamp, min_peptides,
min_pep_nsamp):
"""
Use checking to find the rows (taxonomic or functional terms) that satisfy all of the filtering conditions for
the specified group
:param mode: either 'f', 't', or 'ft'
:param df: data frame of functional and taxonomic terms. missing values are represented as 0.
:param grp: grp to check conditions for
:param samp_grps: SampleGroups() object
:param qthreshold: minimum number of quantitations per grp
:param min_child_non_leaf: minimum number of children for terms that are not leaves
:param min_child_nsamp: minimum number of samples with sample children greater than min_child_non_leaf
:param min_peptides: minimum number of peptides for each term
:param min_pep_nsamp: minimum number of samples where the number of peptides has to be larger than
min_peptides
:return: boolean Series with rows to keep as True
"""
# intensity
intcols = samp_grps.sample_names[grp]
keep_int = (df[intcols] > 0).apply(sum, axis=1) >= qthreshold
# peptides
peptide_cols = samp_grps.n_peptide_names_dict[grp]
peptide_keep_series = (df[peptide_cols] > min_peptides)
if min_pep_nsamp == "all":
keep_peptide = peptide_keep_series.all(axis=1)
else:
keep_peptide = peptide_keep_series.apply(sum, axis=1) >= int(min_pep_nsamp)
if mode != 'ft':
# child non leaf
child_cols = samp_grps.samp_children_names_dict[grp]
child_keep_series = (df[child_cols] >= min_child_non_leaf) | (df[child_cols] == 0)
if min_child_nsamp == "all":
keep_child = child_keep_series.all(axis=1)
else:
keep_child = child_keep_series.apply(sum, axis=1) >= int(min_child_nsamp)
all_keep = keep_int & keep_child & keep_peptide
else:
all_keep = keep_int & keep_peptide
return all_keep | 353c0b0d2717018f60178f37d777be25bbcf2193 | 41,149 |
def send(r, stream=False):
"""Just sends the request using its send method and returns its response. """
r.send(stream=stream)
return r.response | 7350fe337450e55744ee82541b90d5204868fff0 | 41,150 |
from pathlib import Path
from datetime import datetime
def unique_path(parent: Path, stem: str, suffix: str, seps=('_', '-'), n: int = 1, add_date: bool = True) -> Path:
"""
:param parent: Directory in which a unique file name should be created
:param stem: File name without extension
:param suffix: File extension, including `.`
:param seps: Separators between stem and date/n, respectfully.
:param n: First number to try; incremented by 1 until adding this value would cause the file name to be unique
:param add_date: Whether a date should be added before n. If True, a date will always be added.
:return: Path with a file name that does not currently exist in the target directory
"""
date_sep, n_sep = seps
if add_date:
stem = f'{stem}{date_sep}{datetime.now().strftime("%Y-%m-%d")}'
name = stem + suffix
while (path := parent.joinpath(name)).exists():
name = f'{stem}{n_sep}{n}{suffix}'
n += 1
return path | 872ec8ad2e24e51edb37a1722f16b85abeb96614 | 41,151 |
def remove_indices_from_range(ixs, max_ix):
"""From the indices 0:max_ix+1, remove the individual
index values in ixs.
Returns the remaining ranges of indices and singletons.
"""
ranges = []
i0 = 0
for ix in ixs:
i1 = ix - 1
if i1 < i0:
i0 = ix + 1
elif i1 == i0:
ranges.append([i0])
i0 = ix + 1
else:
ranges.append([i0,i1+1])
i0 = ix + 1
if i0 < max_ix:
ranges.append([i0, max_ix+1])
elif i0 == max_ix:
ranges.append([i0])
return ranges | df71db04b7e521815042237000f036735fbbe0f3 | 41,152 |
def maybe_append(df1, df2):
"""
If both data frames are available, append them and return. Otherwise, return
whichever frame is not None.
"""
if df1 is None:
return df2
if df2 is None:
return df1
return df1.append(df2) | aaabcc0f175fc913f0dbce575888cf08ff625c98 | 41,153 |
def max_subarray(nums):
"""
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
Args:
nums: list[int]
Returns:
int
"""
# Method 1 Kadane Algorithm
max_so_far = max_end_here = nums[0]
for x in nums:
# DP, optimal substructure:
max_end_here = max(max_end_here + x, x) # max_end_here[i] = max(max_end_here[i - 1] + nums[i], nums[i])
max_so_far = max(max_so_far, max_end_here) # max_so_far[i] = max(max_so_far[i-1], max_end_here[i])
return max_so_far | 8660758cc758f85ea4750e491f249b08c0dfdd00 | 41,154 |
def pandas_table_to_nested_list(df):
"""
Converts pandas table df to nested list
"""
table_data = [["" for x in range(df.shape[1])] for y in range(df.shape[0]+1)]
# Columns names
for i in range(df.shape[1]):
table_data[0][i] = df.columns[i]
for i in range(df.shape[0]):
for j in range(df.shape[1]):
table_data[i+1][j] = df.iat[i, j]
return table_data | fc5aa04de82dcacab5ae6f6c64f22417d3d9318f | 41,155 |
def viz_white_wrap(body, char, bgcolor="white"): # pylint: disable=unused-argument
"""Wrap body with hidden text for graphviz"""
return (
'<FONT COLOR="{bgcolor}">'
' <FONT COLOR="black">{body}</FONT>'
'{char}</FONT>'
).format(**locals()) | 8131fc58c42c2878a2cbb4f7f69ab1b5054774e0 | 41,156 |
def cescape(string):
"""Escapes special characters needed for color codes.
Replaces the following symbols with their equivalent literal forms:
===== ======
``@`` ``@@``
``}`` ``}}``
===== ======
Parameters:
string (str): the string to escape
Returns:
(str): the string with color codes escaped
"""
string = str(string)
string = string.replace('@', '@@')
string = string.replace('}', '}}')
return string | 48aef7c95851f9a7ae475d3ba38db55ce09fb5de | 41,158 |
import os
def get_svgs(names: list, root: str, folders: list) -> list:
"""Get svgs from multiple folders.
:param names: The names of the svg files.
:param root: The root folder that the folders are in.
:param folders: A list of folder names
:return: list
"""
svgs = []
if len(folders) > 0 :
for folder in folders:
path = os.path.join(root, folder)
contents = os.listdir(path)
for item in contents:
if item in names:
item_path = os.path.join(path, item)
svgs.append(item_path)
else :
contents = os.listdir(root)
for item in contents:
if item in names:
item_path = os.path.join(root, item)
svgs.append(item_path)
return svgs | eb59bf08c1726067bcb56a1c9775e74388f37ae1 | 41,159 |
def closest_pair_strip(cluster_list, horiz_center, half_width):
"""
Helper function to compute the closest pair of clusters in a vertical strip
Input: cluster_list is a list of clusters produced by fast_closest_pair
horiz_center is the horizontal position of the strip's vertical center line
half_width is the half the width of the strip (i.e; the maximum horizontal distance
that a cluster can lie from the center line)
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist.
"""
strip = list()
for cluster in cluster_list:
if abs(cluster.horiz_center()-horiz_center) < half_width:
strip.append((cluster, cluster_list.index(cluster)))
strip.sort(key = lambda cluster: cluster[0].vert_center())
length = len(strip)
dist, idx1, idx2 = float('inf'), -1, -1
for idx_u in range(length-1):
for idx_v in range(idx_u+1, min(idx_u+4, length)):
uv_dist = strip[idx_u][0].distance(strip[idx_v][0])
if uv_dist < dist:
dist = uv_dist
if strip[idx_u][1] < strip[idx_v][1]:
idx1 = strip[idx_u][1]
idx2 = strip[idx_v][1]
else:
idx1 = strip[idx_v][1]
idx2 = strip[idx_u][1]
return (dist, idx1, idx2) | d6966ec785d6ca5053ab8f91661735cbe0083dc5 | 41,160 |
def tb_args(exc):
"""Easily format arguments for `traceback` functions."""
return (type(exc), exc, exc.__traceback__) | d5c65e67556c28de3a97742fb4115cf8bddfb6a4 | 41,163 |
def flatten_list(cols_list, recursive=True):
"""Take a list of lists and return a flattened list.
Args:
cols_list: an iterable of any quantity of str/tuple/list/set.
Example:
>>> flatten_list(["a", ("b", set(["c"])), [["d"]]])
["a", "b", "c", "d"]
"""
cols = []
for i in cols_list:
if isinstance(i, (set, list, tuple)):
cols.extend(flatten_list(i) if recursive else i)
else:
cols.append(i)
return cols | d8e16a99b2e5f61ce53813ca7424e1b01cb1cddf | 41,164 |
def get_digit(number):
"""
숫자로 변환하는 지 확인
True로 나오면 통과
"""
result = number.isdigit()
return result | 4f3a70802ef66eb6f99946bb39bda69f9420a1a6 | 41,167 |
def table_entry_size(name, value):
"""
Calculates the size of a single entry
This size is mostly irrelevant to us and defined
specifically to accommodate memory management for
lower level implementations. The 32 extra bytes are
considered the "maximum" overhead that would be
required to represent each entry in the table.
See RFC7541 Section 4.1
"""
return 32 + len(name) + len(value) | fb05f2299bd264d3ae8143307d9c428aad18d5d7 | 41,168 |
def z_array(s):
"""
Z-algorithm used in BM-Search
:param s: the string from which to extract
:return: a list of the length of prefix-substring
"""
assert len(s) > 1
n = len(s)
z = [0] * n
z[0] = n
l, r = 0, 0
for i in range(1, n):
if i > r:
# i > r, i is on the right side of r
# reset position of l and r because s[l..r] at least starts at s[i]
l, r = i, i
while r < n and s[r - l] == s[r]:
# compare S[0...] until r hits the end
r += 1
# restore r back to the location (we're off by one)
r -= 1
# the maximum prefix-substring will have the length of r - l +1
# because r stops at first non-matching letter after l
z[i] = r - l + 1
else:
# i <= R, i not on the right side
k = i - l
if z[k] < r - i + 1:
# we have at most this many matches
z[i] = z[k]
else:
l = i
# we could (possibly) still match some after s[i]
# therefore, we continue with s[r], which corresponds to s[r-l],
# the first character matched in s
while r < n and s[r - l] == s[r]:
r += 1
# restore r back to the location (same, we're off by one)
r -= 1
z[i] = r - l + 1
return z | fa0eb3ab7ccc9f9cf7bf42fbea63a1fb13d4cf45 | 41,171 |
import os
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes | ce818f99928686d5cb6a5caa8d5534af3a934d95 | 41,172 |
import os
def testcase():
"""Try to locate the test case that comes with cadishi. Works for a
check-out (or tarball) of the source files as well as for an installation.
Returns the full path to the testcase including a trailing slash."""
file_path = os.path.dirname(os.path.abspath(__file__))
testcase_path = os.path.abspath(file_path + "/tests/data")
return testcase_path + "/" | 772be140397f9314498069ec79e665aa143c39a4 | 41,173 |
def bitparity(x):
"""return the bit parity of the word 'x'. """
assert x >= 0, "bitparity(x) requires integer x >= 0"
while x > 0xffffffff:
x = (x >> 32) ^ (x & 0xffffffff)
x ^= x >> 16
x ^= x >> 8
x ^= x >> 4
return (0x6996 >> (x & 15)) & 1 | 0ccc04cf8df450b1bfd67334e006a57f4f6c53bc | 41,174 |
def x_times(a):
"""
Multiply the given polinomial a, x times.
"""
return (((a << 1) ^ 0x1B) & 0xFF) if (a & 0x80) else (a << 1) | b25bb58abdebf1f83bbf9c206b616c3420ec713c | 41,175 |
def global_video_success(row, weights=None):
"""Create a video success measurement based on basic video stats."""
metric_cols = ["commentCount", "dislikeCount", "favoriteCount",
"likeCount", "viewCount"]
if weights is None:
weights = [1 for _ in metric_cols]
weights[1] = -1
scores = [row[key] for key in metric_cols]
return sum(scores) | 49ca735b7efe54b49f29ba79be6c5aee250d64ba | 41,176 |
def has_converged(mu, oldmu):
"""
A boolean indicating whether or not a set of centroids has converged
Parameters:
mu - the latest array of centroids
oldmu - the array of centroids from the previous iteration
Returns:
A boolean indicating whether or not the old and new centroids are the same,
representing whether or not the clustering has converged
"""
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu])) | 35234531a15baaf4f1df2f196e6abcec40fade59 | 41,177 |
import copy
def merge_config(new_config, old_config):
"""Merge the user-defined config with default config"""
config = copy.deepcopy(old_config)
if new_config is not None:
config.update(new_config)
return config | 0d6d3f4b1df504485b991d6edc63d675439ab6d0 | 41,178 |
def make_range(chain_range_dic):
"""Expand a chain dictionary into ranges."""
chain_ranges = {}
for chain in chain_range_dic:
min_idx = min(chain_range_dic[chain])
max_idx = max(chain_range_dic[chain])
chain_ranges[chain] = (min_idx, max_idx)
return chain_ranges | 718f1acfae09fb0651cd351d232359f8b8ff7dfc | 41,179 |
def track_links(soup):
""" Replace links with tracking links. """
# For each link in the email
for link in soup.findAll('a'):
# Check to ensure it has an href attribute.
if link.has_attr('href'):
# Only track online.msstate.edu links
if 'https://online.msstate.edu/' in link['href']:
# Add the capture email tracker.
new_href = link['href'] + "?cbe_email={{Email}}"
# Replace the existing link with the new one.
link['href'] = new_href
# Return the modified soup.
return soup | 2557501cd38b49dc0c9600be9a5f8883ebe3a834 | 41,181 |
def healthcheck():
"""Low overhead health check."""
return 'ok', 200 | d29c4c9f20d8041e781af2db0b83c40af5410143 | 41,182 |
def readable_conversion_file(url):
"""Reads the provided url as a conversion file"""
conversion_dict = []
with open(url, 'r', encoding='utf-8') as f:
lines = f.readlines()
assert len(lines) == 9, "The conversion file must have exactly 9 lines detailing the 9 conversion categories"
for line in lines:
category_dict = {}
for part in line.split(","):
key, value = [subpart.strip() for subpart in part.split(':')]
category_dict[key] = value
conversion_dict.append(category_dict)
return conversion_dict | d0cb9c6b5c6c4486e1b8400085d46b70c28efe70 | 41,183 |
def format_generic(value):
"""
Generic values with space in them need to be quoted
"""
value_str = str(value)
if " " in value_str:
return '"%s"' % value_str
return value_str | 850d1969f68cc8b5dd132a51e616862393d61513 | 41,184 |
import math
def calculateHeading(origin, destination):
""" Calculate the heading direction between two coordinates. It returns the
heading in degrees, where 0 deg is North. (This is not very accurate but
good enough for us.)"""
x1 = destination[0]
y1 = destination[1]
x2 = origin[0]
y2 = origin[1]
degrees = math.degrees(math.atan2((y1 - y2), (x1 - x2)))
degrees = degrees + 90 # North is 0 deg
return degrees | b114c6c4c028e148fe87f828128d2bd2766f0c61 | 41,185 |
def for_factorial(num):
"""Iterative solution"""
result = 1
for i in range(2, num + 1):
result *= i
return result | 55371d55161bb9bf3547eb9a03f636364b86bc5c | 41,190 |
def extract_unique_series(sessions):
"""
Presently, the user will have to choose a subject that has the most
representative number of scans to constitute a 'complete' series.
This function determines completeness based on the number of unique
series that a subject possesses.
It would be better if the most complete series/scan list was
gotten by looking through every subject and extracting unique
series from that list, but for this moment we're relying on the user
to make a good choice.
:param sessions: A queryset of selected sessions passed in from a
search
:return: a single session's PK where that session contains the most
complete ((although it might be better to
return the session entirely, saves ourselves 1 query)
"""
subject_and_series = {}
series_description = []
storage_dict = {}
series_number = {}
for session in sessions:
series = session.series_set.all()
list_of_series = []
# this dictionary is being used to store k: series desc, v: series primary key
# it's necessary to divorce the two and easily reunite them later.
# this is done solely so that when referring to html elements id's in the templates
# that there are only numbers, no invalid characters referenced (it's fine for
# rendering, but jquery does not like invalid chars).
for each in series:
list_of_series.append(each.SeriesDescription)
storage_dict[each.SeriesDescription] = each.pk
series_number[each.SeriesDescription] = each.SeriesNumber
series_description = series_description + list_of_series
unique_series = list(set(series_description))
unique_w_pk = [(series, storage_dict[series], series_number[series]) for series in unique_series]
unique_w_pk.sort(key=lambda tup: tup[2])
return unique_w_pk | e19c7ae15f871ce5d903ebb6cba6e13bc1889152 | 41,191 |
import math
def smoothedsigmoid(x, b=1):
"""
English:
b controls smoothness, lower = smoother
Japanese:
b は緩やかさを調整します。b が小さいほど緩やかに(変化が小さく)なります。
"""
return 1 / (1 + math.exp(- b * x)) | 014bec11a761fcf19c9e5885a1fa870115b90a00 | 41,192 |
def diff_lists(list1,list2, option=None):
"""
if option equal 'and', return a list of items which are in both list1
and list2. Otherwise, return a list of items in list1 but not in list2.
"""
if option and option == 'and':
return [x for x in list1 if x in list2]
else:
return [x for x in list1 if x not in list2] | 2ffe6656d638d1ce185501361288266158ead09f | 41,193 |
from typing import Sequence
def boolListToString(binary : Sequence[bool]) -> str:
"""Convert a boolean list to a string
Parameters
----------
binary : Sequence[bool]
Sequence of booleans representing a binary number in
big endian form
Returns
-------
str
String representing a binary number in big endian form
"""
rtn = ""
for val in binary:
if val:
rtn += "1"
else:
rtn += "0"
return rtn | 9a0eda92124336b66ca74304efabdf1c7f1b082e | 41,194 |
def print_messages(original_func):
"""
Print loading messages to display for user.
:param original_func: A function
:precondition: original_func must be a well-formed function
:postcondition: Successfully invoke the wrapper function
:return: wrapper_printer
"""
def wrapper_printer(*args, **kwargs):
print("\nLoading...")
original_func(*args, **kwargs)
return wrapper_printer | 348d242c94aef178af34b794287cad1cfffc3b9d | 41,195 |
import os
def _getName(path, synapse_dir, local_root, depth):
"""
Finds the name of files in local directory.
:param path:
:param synapse_dir:
:param local_root:
:param depth:
:return: name of file and it's associated parent location/benefactor
"""
path_no_root = path[len(os.path.abspath(local_root)):]
if depth is not None and path_no_root.count(os.path.sep) > depth - 1:
if str.startswith(path_no_root, '/'):
path_no_root = path_no_root[1:]
temp_name = path_no_root.split('/')[(depth - 1):]
name = '_'.join(temp_name)
temp_name = '/'.join(temp_name)
parent = synapse_dir[os.path.dirname(path[:-len(temp_name)])]
else:
name = os.path.basename(path)
parent = synapse_dir[os.path.dirname(path)]
return name, parent | 719862832d7ca6570eb59564311bebca107c4f5b | 41,197 |
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result | f29ce3ea6422f49b34104ce396162f7e76ca851d | 41,200 |
def get_n_params(model):
"""
DESCRIPTION:
Function to count number of parameters.
"""
np=0
for p in list(model.parameters()):
np += p.nelement()
return np | 3d30935a1a58eddf79a1b124584ee7aae0079360 | 41,201 |
def process_passport(passport):
"""Turn a passport list into a dictionary."""
pass_string = ' '.join([n.strip('\n') for n in passport])
pass_list = pass_string.split(' ')
pass_dict = {}
for n in pass_list:
key, entry = n.split(':')
pass_dict[key] = entry
return pass_dict | 30e5d0943f8b34fd5c02dad70af143070348331d | 41,202 |
def is_valid_dsl(query):
"""Simple text check"""
if not "return" in query:
raise Exception("\n----\nYour DSL query does not include a `return` statement. Should end with: '.. return publications[id+concepts_scores]'")
q = query.split("return")[1]
if "concepts" in q:
return True
else:
raise Exception("\n----\nYour DSL query does not return concepts. Should end with: '.. return publications[id+concepts_scores]'") | 53be3514358754d02e321a07d016fa826beb00ce | 41,203 |
def upilab6_3_3 () :
"""6.3.3. Exercice UpyLaB 6.8 - Parcours vert bleu rouge
Écrire une fonction store_email(liste_mails) qui reçoit en paramètre une liste d’adresses e-mail et qui renvoie un
dictionnaire avec comme clés les domaines des adresses e-mail et comme valeurs les listes d’utilisateurs correspondantes, triées par ordre croissant (UTF-8).
Exemple :
L’appel de la fonction suivant :
store_email(["ludo@prof.ur", "andre.colon@stud.ulb",
"thierry@profs.ulb", "sébastien@prof.ur",
"eric.ramzi@stud.ur", "bernard@profs.ulb",
"jean@profs.ulb" ])
retourne le dictionnaire :
{ 'prof.ur' : ['ludo', 'sébastien'],
'stud.ulb' : ['andre.colon'],
'profs.ulb' : ['bernard', 'jean', 'thierry'],
'stud.ur' : ['eric.ramzi'] }"""
def store_email(liste_mails) :
listeDom = []
dictDomNom = {}
for email in liste_mails :
[nom, domaine] = email.split('@')
if domaine in listeDom :
dictDomNom[domaine].append(nom)
dictDomNom[domaine].sort()
else :
dictDomNom[domaine] = [nom]
listeDom.append(domaine)
return dictDomNom
test = [["ludo@prof.ur", "andre.colon@stud.ulb","thierry@profs.ulb", "sébastien@prof.ur","eric.ramzi@stud.ur",
"bernard@profs.ulb", "jean@profs.ulb" ],
['ludo@prof.ur', 'andre.colon@stud.ulb', 'thierry@profs.ulb', 'sébastien@prof.ur', 'eric.ramzi@stud.ur',
'bernard@profs.ulb', 'jean@profs.ulb'],
['ludo@prof.ur', 'ludo@stud.ulb', 'ludo@profs.ulb', 'sébastien@prof.ur', 'sébastien@stud.ur',
'sébastien@profs.ulb']
]
reponse = [{ 'prof.ur' : ['ludo', 'sébastien'], 'stud.ulb' : ['andre.colon'],
'profs.ulb' : ['bernard', 'jean', 'thierry'], 'stud.ur' : ['eric.ramzi'] },
{'profs.ulb': ['bernard', 'jean', 'thierry'], 'stud.ulb': ['andre.colon'],
'prof.ur': ['ludo', 'sébastien'], 'stud.ur': ['eric.ramzi']},
{'profs.ulb': ['ludo', 'sébastien'], 'stud.ulb': ['ludo'], 'prof.ur': ['ludo', 'sébastien'],
'stud.ur': ['sébastien']}
]
for n, t in enumerate(test) :
dic = store_email(t)
print("\n\n"+120*'_'+"à partir de ", t, "\n la fonction renvoie : \n", dic, "\n et il est attendu :\n", reponse[n] )
print("Test réussi ? :", dic == reponse[n]) | 8b4afbc8cba36967fa2a95588830c188e761204b | 41,204 |
import re
def course_url(course_id):
"""
Given a course id string, returns the URL for the link to that
course's website.
"""
if course_id.startswith("math"):
return "https://www.wellesley.edu/math/curriculum/current_offerings"
elif course_id.startswith("cs"):
return "https://cs.wellesley.edu/~" + course_id
else:
dept_matches = re.findall("[a-z]+", course_id)
dept = None
if len(dept_matches) > 0:
dept = dept_matches[0]
if dept:
return "https://www.wellesley.edu/" + dept + "/curriculum"
else:
print(f"Unable to extract department from: '{course_id}'")
return "https://www.wellesley.edu/cs/curriculum" | 8c2e7ce8409c359ed6bd3ae81c5a0a0141217eef | 41,205 |
import random
def make_cat_string(categories=(None,)):
"""Function that generates a value for a StringType field
that can only take a limited number of values.
Parameters
----------
categories : list or tuple of strings
Sequence of strings to select values from
Returns
-------
str
"""
return str(random.choice(categories)) | 2f46e462e5f37c68af096f3b1d25e270380ebf0c | 41,207 |
def one_to_all_bfs(start, num_vertexes, edges, INF=9223372036854775807):
"""
when all cost is 1, BFS is faster (ABC170E)
"""
distances = [INF] * num_vertexes
distances[start] = 0
to_visit = [start]
while to_visit:
next_visit = []
for frm in to_visit:
for to in edges[frm]:
new_cost = distances[frm] + 1
if new_cost < distances[to]:
distances[to] = new_cost
next_visit.append(to)
to_visit = next_visit
return distances | 5ab315f293fabf8ac281ac50ab8b55c0bde6fe46 | 41,208 |
def remove_none_items(adict):
"""Return a similar dict without keys associated to None values"""
return {k: v for k, v in adict.items() if v is not None} | ddea6a77bc55ce33485f74c83a75e14f01d303a9 | 41,209 |
def read_atom_properties_file(filedir):
"""
Reads the text file "Dans Element Properties.txt"
Returns a list of dicts containing atomic properites from multiple sources
data = read_atom_properties_file(filedir)
data[22]['Element']
:param filedir: location of "Dans Element Properties.txt"
:return: [dict, ...]
"""
with open(filedir, 'rt') as f:
lines = f.readlines()
head = None
data = []
for line in lines:
# Header
if '#' in line: continue
if head is None: head = line.split(); continue
# Data
vals = line.split()
element = {}
for n in range(len(vals)):
try:
value = int(vals[n])
except ValueError:
try:
value = float(vals[n])
except ValueError:
value = vals[n]
element[head[n]] = value
data += [element]
return data | 4c8c6f31c2060aee8925b46f48984cdd836a66dd | 41,210 |
import struct
def unpack_info_packet(packet):
"""Unpack an informational packet."""
return struct.unpack("Ld", packet) | 5976abc2b2fc1d072bf5434e639cbf27f3e69e58 | 41,211 |
def compute_input_history(history):
"""Slicing history in its second dimension."""
# no slicing for now
return history[:, 2:5] | fdc8b8e2da55d12dd8b53ca8bedc300da6779341 | 41,212 |
def release_string(d_release):
""" Produces a string describing a release
Args:
d_release (dict): dictonary containing the release data
Returns:
(string): representing the release
Raises:
(KeyError):
if the data does not contain the field
"basic_information".
>>> release_string({'id': 1}) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
KeyError: "Your release 1 doesn't contain the field 'basic_information'"
Example:
>>> with open('discogs_finder/tests/test.json', 'r') as f:
... r = json.load(f)
>>> release_string(r) # doctest: +NORMALIZE_WHITESPACE
u'Keith Jarrett: Shades (3318191)'
"""
release_id = d_release['id']
basics = d_release.get('basic_information', None)
if not basics:
raise KeyError("Your release %d doesn't contain"
" the field 'basic_information'" % release_id)
artists = basics.get('artists', None)
if len(artists):
j = artists[0]['join']
if j == ',':
j = '%s ' % j
else:
j = ' %s ' % j
arts = j.join((a['name'] for a in artists))
else:
arts = None
title = basics.get('title', None)
return u'{arts}: {title} ({release_id})'.format(arts=arts,
title=title,
release_id=release_id) | 4ca448b4778fd0ef56bbcfc0c3dce1c60d157174 | 41,213 |
def makestamp(daynumber, timestamp):
"""Receives a Julian daynumber (integer 1 to 16777215) and an (HOUR, MINUTES) tuple timestamp.
Returns a 5 digit string of binary characters that represent that date/time.
Can receive None for either or both of these arguments.
The function 'daycount' in dateutils will turn a date into a daynumber.
"""
if not daynumber:
datestamp = chr(0)*3
else:
day1 = daynumber//65536
daynumber = daynumber % 65536
day2 = daynumber//256
daynumber = daynumber%256
datestamp = chr(day1) + chr(day2) + chr(daynumber)
if not timestamp:
datestamp = datestamp + chr(255)*2
else:
datestamp = datestamp + chr(timestamp[0]) + chr(timestamp[1])
return datestamp | eaa939dca4cee0cebcccad3f16c892fef2b66ebf | 41,215 |
import hashlib
def findhash(path: str) -> str:
"""Calculates the MD5 Hash for the path specified"""
h = hashlib.md5()
filefrompath = open(path, 'rb')
with filefrompath as file:
chunk = file.read(1024)
while len(chunk) > 0:
h.update(chunk)
chunk = file.read(1024)
filehash = h.hexdigest()
filefrompath.close()
del filefrompath
return filehash | 6402aa4c7eb50c77918ebb98b69c1027fa39bb45 | 41,217 |
def atleast_list(thing):
"""Make sure the item is at least a list of len(1) if not a list
otherwise, return the original list
Args
----
thing (any type) : thing to assert is a list
Returns
-------
thing (list)
"""
if not isinstance(thing, list):
thing = [thing]
return thing | e97b61266b76aa5ffea65541515e44807c57ba1a | 41,218 |
def state_labels():
""" Define the state labels for the states in the MDP """
labels = {}
labels[0] = 'New'
labels[1] = 'Used'
labels[2] = 'Bad'
labels[3] = 'Broken'
return labels | d050d3c4c6b167baa42a0d8d5f18d7b3d6ca3f90 | 41,219 |
import decimal
def has_number_type(value):
"""
Is a value a number or a non-number?
>>> has_number_type(3.5)
True
>>> has_number_type(3)
True
>>> has_number_type(decimal.Decimal("3.5"))
True
>>> has_number_type("3.5")
False
>>> has_number_type(True)
False
"""
return isinstance(value, (int, float, decimal.Decimal)) and not isinstance(value, bool) | d5db38736244af750ee881ceb83b5433eecd6bb9 | 41,220 |
def flat_list(x=None):
"""
Description:
It returns a list that contains all the elements form the input list of lists.
It should work for any number of levels.
Example:
>>> x = flat_list([1, 'k', [], 3, [4, 5, 6], [[7, 8]], [[[9]]]])
>>> x
>>> [1, 'k', 3, 4, 5, 6, 7, 8, 9]
Args:
- x (list): List of lists of objects.
Raises:
- TypeError: If the input is not a list object.
"""
# Check for empty input.
if x is None:
return []
# _end_if_
# Input 'x' should be a list.
if not isinstance(x, list):
raise TypeError(" Input should be a list.")
# _end_if_
# Define the return list.
flat_x = []
# Go through all the list elements.
for item_x in x:
# Check for embedded lists.
if isinstance(item_x, list):
# Check for empty entries.
if not item_x:
continue
# _end_if_
# Note the recursive call in "flat_list".
for ix in flat_list(item_x):
flat_x.append(ix)
# _end_for_
else:
# If the item is not a list.
flat_x.append(item_x)
# _end_if_
# _end_for_
# Return the flatten list.
return flat_x | 5b96d06192ac96530674042459277f9acfd6c707 | 41,221 |
def is_date_special(date):
"""
Is this particular date special
"""
return date.day * date.month == date.year % 100 | 2758b4604e4d88a32608c4def8a95b222f7bcef8 | 41,223 |
def street_check_fold_json(
rus_check_json, benny_bet_json, rus_fold_json, oven_show_json
):
"""Expected JSON for street_check_fold model-fixture"""
return {
"actions": [
rus_check_json,
benny_bet_json,
rus_fold_json,
oven_show_json,
]
} | 025fa8f618b14cfe59e8af6016ab8219e25b2ecf | 41,225 |
import pipes
def CommandToString(command):
"""Returns quoted command that can be run in bash shell."""
return ' '.join(map(pipes.quote, command)) | e20a81b1336352e51b624e41aca3bea615cd030b | 41,226 |
import re
def _parse_parameters(script):
"""Parse parameters from script header"""
params = {'profiles': [],
'templates': [],
'platform': ['multi_platform_all'],
'remediation': ['all']}
with open(script, 'r') as script_file:
script_content = script_file.read()
for parameter in params:
found = re.search('^# {0} = ([ ,_\.\-\w]*)$'.format(parameter),
script_content,
re.MULTILINE)
if found is None:
continue
splitted = found.group(1).split(',')
params[parameter] = [value.strip() for value in splitted]
return params | 30c028dd1bbd8c4737a613c15bf311798ef8e816 | 41,228 |
def tokenize_multi_turn_dialog(dataset, tokenizer, special_tokens):
"""
Format > [[{'usr': <user utterance>, 'sys': <system utterance>}, ...],...]
"""
tokenized_dialogs = []
for i, dialog in enumerate(dataset):
print('\r %.4f...' % ((i+1)/len(dataset)), end='')
tokenized_turn = []
for turn in dialog:
usr_ut = tokenizer.convert_tokens_to_ids(tokenizer.tokenize('user : ' + turn['usr']))
sys_ut = tokenizer.convert_tokens_to_ids(tokenizer.tokenize('system : ' + turn['sys']))
tokenized_turn.append({'usr': usr_ut, 'sys': sys_ut})
# Save all dialogues per turn
tokenized_dialogs.append(tokenized_turn.copy())
print('\nDone!')
return tokenized_dialogs | 510340af2383277ff03a31746b0a5a52d4792570 | 41,229 |
def channel1json():
"""Return a dict for a channel
Name: test_channel
"""
c = {"mature": False,
"status": "test status",
"broadcaster_language": "en",
"display_name": "test_channel",
"game": "Gaming Talk Shows",
"delay": 0,
"language": "en",
"_id": 12345,
"name": "test_channel",
"logo": "test_channel_logo_url",
"banner": "test_channel_banner_url",
"video_banner": "test_channel_video_banner_url",
"url": "http://www.twitch.tv/test_channel",
"views": 49144894,
"followers": 215780}
return c | bdeef1f6f44c27e90d06885ccd7d8e2a0483a95d | 41,231 |
def get_communicator(episode, agents, alternate=False):
"""
This function selects the communicator.
:param episode: The current episode.
:param agents: The agents in the game.
:param alternate: Alternate the leader or always the same.
:return: The id of the communicating agent and the communicating agent itself.
"""
if alternate:
communicator = episode % len(agents)
else:
communicator = 0
communicating_agent = agents[communicator]
return communicator, communicating_agent | fb0939abe003f4aba04e58870a1266982921dec1 | 41,232 |
def _get_language(uid, query):
"""
Returns ui_locales of a language
:param uid: of language
:param query: of all languages
:return: string
"""
return query.get(uid).ui_locales | 914d0a1e59ea34a5732b8baee13bf5899a10cc3f | 41,234 |
from typing import OrderedDict
import collections
def tokenize_annotations(annotations):
"""Function to tokenize & convert a list of genes GO term annotations to their equivalent list of GO term annotation ids"""
go_terms = []
for annotation in annotations:
go_terms.extend(annotation.split())
go_terms_freq = OrderedDict({k: v for k, v in sorted(collections.Counter(go_terms).items(), key=lambda item: item[1], reverse=True)}) # example output: OrderedDict([('GO0006810', 804), ('GO0006351', 512), ('GO0006355', 497), ..., ('GO0006351', 56), ('GO0006873', 13), ('GO0034427', 2)])
go_term_indeces = {go_term:indx+1 for indx, go_term in enumerate(go_terms_freq)} # each index represents a one-hot vector for its assiciate GO term
annotations_to_annotation_ids = []
for annotation in annotations:
annotations_to_annotation_ids.append([go_term_indeces[go_term] for go_term in annotation.split()])
return annotations_to_annotation_ids, go_term_indeces | 25e1ebf7482e58d0c0f61d2fa3e3cae039c12a22 | 41,235 |
import torch
def ridge_regularize(network, lam):
"""Apply ridge penalty at linear layer and hidden-hidden weights."""
return lam * (
torch.sum(network.linear.weight ** 2) + torch.sum(network.rnn.weight_hh_l0 ** 2)
) | 29e46f10b0ee63f0bda090836b95b1f0b4b1664c | 41,236 |
def build_grid(filename):
"""Scrapes a formatted text file and converts it into a word search grid.
Args:
filename: A text file containing rows of alphabetical characters,
optionally separated by spaces or commas. Each row must contain the
same number of letters.
Returns:
A 2d list, representing the rows and columns of the word search grid.
"""
grid = []
input_file = open(filename, 'r')
for line in input_file.read().splitlines():
# Ignore separators
line = line.replace(' ', '')
line = line.replace(',', '')
row = list(line)
grid.append(row)
input_file.close()
return grid | 0bed89308de1ba3c6fb1d0372364305deb5d0856 | 41,237 |
import string
def make_a_valid_directory_name(proposed_directory_name):
"""In the case a field label can't be used as a file name the invalid
characters can be dropped."""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
directory_name = "".join(c for c in proposed_directory_name if c in valid_chars)
directory_name = directory_name.replace(" ", "-")
return directory_name | 8e1d04d4c5c629a123f38ef95fecafc1e4f7f7d0 | 41,238 |
def read_experiment_data(f):
"""Read data from stdin"""
commands = []
lines = f.readlines()
for l in lines:
s = l.split()
if len(s) > 0:
commands += [s]
return commands | 9d6c5f6d76eef0f5feb6cabfb128c169949e4016 | 41,240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.