content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import hashlib
def md5_of_file(fname: str) -> str:
"""
get md5 hash of file content
https://docs.python.org/3/library/hashlib.html
https://www.geeksforgeeks.org/md5-hash-python/
Args:
fname: name of file to hash
Returns:
hash as string
Raises:
>>> md5_file('name_of_file')
d41d8cd98f00b204e9800998ecf8427e
"""
with open(fname, "rb") as fil:
file_content = fil.read()
return hashlib.md5(file_content).hexdigest()
|
8e3e07f0837a13825f668b6c833b19b090105f2b
| 56,453
|
def join_filename(basename, extension):
"""
Create a filename joining the file basename and the extension.
"""
basename = basename.rstrip('.').strip()
extension = extension.replace('.', '').strip()
filename = '{}.{}'.format(basename, extension)
return filename
|
9d8feddae8dbb42563fe0ae20a4994fdbba33903
| 56,454
|
def identity (x) :
"""Returns its argument unchanged"""
return x
|
8fa963f85a270d7af368e9d58d255fe1bc24a30b
| 56,463
|
import re
def find_nth(haystack, needle, n):
"""
Finds nth occurrence of a string in a piece of text.
If n is negative, returns nth instance of string from
the end of the text.
Parameters
----------
haystack (str): piece of text to search through
needle (str): string to search for in text
n (int): which occurence to identify (1st, 2nd, 3rd)
Returns:
-------
int:
position in haystack where nth occurrence of
needle starts.
"""
if n < 0:
return [match.start(0) for match in re.finditer(needle, haystack)][n]
else:
return [match.start(0) for match in re.finditer(needle, haystack)][n-1]
|
9a6b1ef0e68dbe6dc4682c98b161978c1cb9b16a
| 56,465
|
import re
def parse_multiline_sql(in_str):
"""
Separates sql string composed of multiple statements into a list of strings
:param in_str: The sql str
:return: A list of sql statements
"""
regex = re.compile(r"""((?:[^;"']|"[^"]*"|'[^']*')+)""")
results = []
bad = ["", "/"]
for p in regex.split(in_str)[1::2]:
if p.strip() in bad:
pass
else:
if p[0] == "\n":
results.append(p[1:])
elif p[:2] == "\r\n":
results.append(p[2:])
elif p[-1:] == "\n":
results.append(p[:-1])
elif p[-2:] == "\r\n":
results.append(p[:-2])
else:
results.append(p)
return results
|
a4edde1870b5551b54e845d9b1558626d1b33139
| 56,466
|
def parse_present(present):
"""Parse present from axbxc to list of int."""
return [int(num) for num in present.split('x')]
|
40110103d112ce771bbc39715142f7a34e6e5dd2
| 56,467
|
def insert_string(string: str, index: int, insert: str):
"""Inserts a string at a given index within another strings."""
return string[:index] + insert + string[index:]
|
18c33f55374940c8235971fe39197ba8a87db636
| 56,474
|
def get_pF(a_F, ecc_F):
"""
Computes the orbital parameter (semi-latus) rectum of the fundamental
ellipse. This value is kept constant for all the problem as long as the
boundary conditions are not changed.
Parameters
----------
a_F: float
Semi-major axis of the fundamental ellipse.
ecc_F: float
Eccentricity of the fundamental ellipse.
Returns
-------
p_F: float
Orbital parameter / semi-latus rectum of the fundamental ellipse.
Notes
-----
No labeled equation (appears between [3] and [4]) from Avanzini's report
"""
p_F = a_F * (1 - ecc_F ** 2)
return p_F
|
b7d0b2955a792ea41e925b8ca42207eb5d31b5b6
| 56,475
|
def _filter_match_all(elements, keywords):
"""
Returns the elements for which all keywords are contained.
:param elements: a list of strings to filter
:param keywords: a tuple containing keywords that should all be included
:return: matching matching elements
"""
matching = []
for elem in elements:
if all(keyword in elem for keyword in keywords):
matching.append(elem)
return matching
|
361a0966d15f2ef6cdeb8f41626ffbcd4b5bdf74
| 56,478
|
def topRange(l, s):
"""
Given a list of values, determines a range in which the values in the top
s% must lie.
Args:
l: A list of values
s: A percentage
Return:
A tuple (lowest value that could be in top s%, max value in l)
"""
mx = max(l)
mn = min(l)
if s is 100:
return (mn, mx)
dx = (mx - mn) / 100.0 # Width of 1% in the range covered by l's vals
min_thresh = mx - (s * dx)
return (min_thresh, mx)
|
4fe86f67015cdb3b7825afdba3181738f337a949
| 56,479
|
def f_open_large_read(path, *args, **kwargs):
"""
A utility function to open a file handle for reading
with a default 64MB buffer size. The buffer size may
be overriden in the `kwargs`, but there is no point
in doing so as this is meant to be a quick utility.
:param path
A string or pathlike object pointing to a desired
"""
if "mode" not in kwargs:
kwargs["mode"] = "r"
if "buffering" not in kwargs:
kwargs["buffering"] = 2 ** 26
return open(path, **kwargs)
|
a54287101f6f5a6a1a2016cd55cac13397987b2a
| 56,488
|
import random
import string
def randomize_filename(ext=None, n_char=25):
"""
Randomly generate a filename
:param str ext: Extension, optional
:param n_char: Number of characters (excluding extension)
:return str: Randomized filename
"""
if not isinstance(n_char, int):
raise TypeError("Character count is not an integer: {}".format(n_char))
if n_char < 0:
raise ValueError("Negative char count: {}".format(n_char))
fn = "".join(random.choice(string.ascii_letters) for _ in range(n_char))
if not ext:
return fn
if not ext.startswith("."):
ext = "." + ext
return fn + ext
|
23f3bb9fabbf146f6e69ea0bbacb10306ace019e
| 56,491
|
def get_meta_idx(frames_metadata,
time_idx,
channel_idx,
slice_idx,
pos_idx):
"""
Get row index in metadata dataframe given variable indices
:param dataframe frames_metadata: Dataframe with column names given below
:param int time_idx: Timepoint index
:param int channel_idx: Channel index
:param int slice_idx: Slize (z) index
:param int pos_idx: Position (FOV) index
:return: int pos_idx: Row position matching indices above
"""
frame_idx = frames_metadata.index[
(frames_metadata['channel_idx'] == channel_idx) &
(frames_metadata['time_idx'] == time_idx) &
(frames_metadata["slice_idx"] == slice_idx) &
(frames_metadata["pos_idx"] == pos_idx)].tolist()
return frame_idx[0]
|
1dae731193872dd4f82a1448fd0883d42524ada2
| 56,495
|
def ask_user(question):
"""
Simple yes/no screen for user, where the "question" string is asked. Takes y(yes)/n(no)
as valid inputs. If no valid input is given, the question is asked again.
Args:
question (str): What question should be asked for the y/n menu
Returns:
(bool): true/false, answer to question
"""
check = str(input(f"{question} (Y/N): ")).lower().strip()
try:
if check[0] in ["y", "yes"]:
return True
elif check[0] in ["n", "no"]:
return False
else:
print('Invalid Input')
return ask_user(question)
except Exception as error:
print("Please enter valid inputs")
print(error)
return ask_user(question)
|
6c30dc5c3aed8a0e436bb1fd6e8ed6372c1ddb34
| 56,498
|
def is_category_on_sale(category, sale):
"""Check if category is descendant of one of categories on sale."""
discounted_categories = set(sale.categories.all())
return any([
category.is_descendant_of(c, include_self=True)
for c in discounted_categories])
|
ca13aa4d616f7f83ff6cf6c7bc1b7b81098c832a
| 56,500
|
import codecs
def _int_to_bytes(i):
"""
Converts the given int to the big-endian bytes
"""
h = hex(i)
if len(h) > 1 and h[0:2] == "0x":
h = h[2:]
# need to strip L in python 2.x
h = h.strip("L")
if len(h) % 2:
h = "0" + h
return codecs.decode(h, "hex")
|
5fdfdeac50d13e26385277c9cef12b4ed393a08b
| 56,506
|
def _doc_with_odd_publish_date(doc):
"""Check if the doc is published exactly at 05:00 UTC (midnight EST)"""
publish_date = doc["doc_publish_date"]
timezone = publish_date.timetz().tzname()
hour = publish_date.hour
minute = publish_date.minute
second = publish_date.second
microsecond = publish_date.microsecond
nanosecond = publish_date.nanosecond
if all(
[
timezone == "UTC",
hour == 5,
minute == 0,
second == 0,
microsecond == 0,
nanosecond == 0,
]
):
return True
else:
return False
|
b85ffe21cdea9685906f9a633a59c15b9f781d71
| 56,508
|
def is_date(stamp):
"""
This function will return True or False, depending if the supplied stamp
can be interpreted as a date string of format DDMMYYYY
"""
if isinstance(stamp, str):
if len(stamp) == 8:
if stamp.isdigit():
DD = int(stamp[:2])
if DD > 31:
return False
MM = int(stamp[2:4])
if MM > 12:
return False
else:
return False
else:
return False
else:
return False
return True
|
16f4aa52cf55da97447e3f9298261bfc6463ced0
| 56,510
|
def dict_to_tuple_list(my_dict):
""" Given a dictionary where each k-v pair is of the form (x, [y]), convert the dictionary
into a list of tuples.
Example:
>>> dict_to_tuple_list({'x': [1, 2, 3], 'y':[4, 5, 6]})
[(x, 1), (x, 2), (x, 3), (y, 4), (y, 5), (y, 6)]
"""
newList = list()
for i in my_dict.keys():
for j in my_dict.get(i):
newList.append((i, j))
return newList
|
747c75393fac770ff4b8ead3e2178996f91454b3
| 56,511
|
def populate_user_patterns(conf, args):
"""Populate a dictionary of configuration pattern parameters, "conf", from
values supplied on the command line in the structure, "args"."""
conf = {}
if args.cc_define1_pattern:
conf['cc_define1_pattern'] = args.cc_define1_pattern
if args.cc_define2_pattern:
conf['cc_define2_pattern'] = args.cc_define2_pattern
if args.cc_incdir_pattern:
conf['cc_incdir_pattern'] = args.cc_incdir_pattern
if args.cc_input_pattern:
conf['cc_input_pattern'] = args.cc_input_pattern
if args.cc_output_pattern:
conf['cc_output_pattern'] = args.cc_output_pattern
if args.ld_input_pattern:
conf['ld_input_pattern'] = args.ld_input_pattern
if args.ld_output_pattern:
conf['ld_output_pattern'] = args.ld_output_pattern
return conf
|
70723b6c8c43d8d1fdcdc0b7f95f7770531e364c
| 56,516
|
def row_divisibles(row):
"""Find two items in a list of integers that are divisible"""
row = list(row)
row_max = max(row)
for i in sorted(row):
for multiplier in range(2, int(row_max/i) + 1):
if i * multiplier in row:
return (i, i * multiplier)
|
195362afb559cc85abff338cfa46a65bb82f3f52
| 56,519
|
import torch
import math
def torch_exp(x):
"""
Like ``x.exp()`` for a :class:`~torch.Tensor`, but also accepts
numbers.
"""
if torch.is_tensor(x):
return torch.exp(x)
else:
return math.exp(x)
|
dd0fa79147e4983609caa6a2ad01474879666261
| 56,522
|
def summary_from_package(package):
""" Create a summary dict from a package """
return {
"name": package.name,
"summary": package.summary or "",
"last_modified": package.last_modified,
}
|
b7b067ae30c2c6cdecaca5eb4a3617d716c7e52e
| 56,523
|
import torch
def create_square_grid(size, dim):
""" Create a regualar square grid of given dimension and size.
The grid will contain size^dim points. We alway grid over the unit cube in
dimension dim.
Parameters
----------
size: int
Number of point along one axis.
dim: int
Dimension of the space to grid.
Returns
-------
coords: (size^dim, dim)
List of coordinate of each point in the grid.
"""
# Creat one axis.
x = torch.linspace(0, 1, steps=size)
# Mesh with itself dim times. Stacking along dim -1 mean create new dim at
# the end.
grid = torch.stack(torch.meshgrid(dim * [x]), dim=-1)
return grid
|
179ba546e937756f5ed1b288dc1fad72903a2011
| 56,524
|
def is_empty_row(listrow):
"""
Check if a CSV row is empty
:param listrow: CSV row
:return: True if the row is empty
"""
return len([c for c in listrow if c != '']) == 0
|
b798e37b3ad6f9875135c03d1524625773b641b4
| 56,526
|
def compare_roa(roa, origin, length):
"""Compare origin AS and prefix length against ROA."""
if length > roa["maxLength"]:
return False
roa_origin = roa["asn"]
if isinstance(roa_origin, int):
roa_origin = f"AS{roa_origin}"
if origin != roa_origin:
return False
return True
|
03a67cbccf64c4755ba24a2b16ef330bc0264968
| 56,527
|
import string
def strkey(val, chaffify=1, keyspace=string.ascii_letters + string.digits):
""" Converts integers to a sequence of strings, and reverse.
This is not intended to obfuscate numbers in any kind of
cryptographically secure way, in fact it's the opposite. It's
for predictable, reversable, obfuscation. It can also be used to
transform a random bit integer to a string of the same bit
length.
@val: #int or #str
@chaffify: #int multiple to avoid 0=a, 1=b, 2=c, ... obfuscates the
ordering
@keyspace: #str allowed output chars
-> #str if @val is #int, #int if @val is #str
..
from vital.security import strkey
strkey(0, chaffify=1)
# -> b
strkey(0, chaffify=4)
# -> e
strkey(90000000000050500502200302035023)
# -> 'f3yMpJQUazIZHp1UO7k'
strkey('f3yMpJQUazIZHp1UO7k')
# -> 90000000000050500502200302035023
strkey(2000000, chaffify=200000000000)
# -> 'DIaqtyo2sC'
..
"""
chaffify = chaffify or 1
keylen = len(keyspace)
try:
# INT TO STRING
if val < 0:
raise ValueError("Input value must be greater than -1.")
# chaffify the value
val = val * chaffify
if val == 0:
return keyspace[0]
# output the new string value
out = []
out_add = out.append
while val > 0:
val, digit = divmod(val, keylen)
out_add(keyspace[digit])
return "".join(out)[::-1]
except TypeError:
# STRING TO INT
out = 0
val = str(val)
find = str.find
for c in val:
out = out * keylen + find(keyspace, c)
# dechaffify the value
out = out // chaffify
return int(out)
|
c730d0a1b7be56ca9462dfec5d09f6659e08518f
| 56,529
|
import math
def laser_shot(character: dict, enemy: dict) -> int:
"""
Use Laser Shot skill.
This is a helper function for use_skill
:param enemy: a dictionary
:param character: a dictionary
:precondition: character must be a dictionary
:precondition: character must be a valid character created by character_creation function
:precondition: enemy must be a dictionary
:precondition: enemy must be a valid character generated by generate_enemy function
:postcondition: calculates damage that is equal to (3 + (Intellect divided by 10)
:postcondition: prints the skill's phrase and damage the ability did
:return: damage as integer
"""
damage = 3 + math.floor(character["Characteristics"]["Intellect"] / 10)
print("\nYour servo-skull shots a laser beam from its eyes dealing {0} damage to {1}.\n".format(damage,
enemy["Name"]))
return damage
|
9ec8cb87927371349e0b73293235e21538d304b7
| 56,533
|
def get_account_from_fund_code(client, fund_code):
"""Get account number based on a fund code."""
if fund_code is None:
account = "No fund code found"
else:
response = client.get_fund_by_code(fund_code)
account = response.get("fund", [{}])[0].get("external_id")
return account
|
d91ac635c0fb2dcd3d4a5f1cba238f165bf74401
| 56,536
|
def diffNumAbs(arg1: int, arg2: int) -> int:
"""
The function takes two arguments arg1 and arg2, both int.
The function as result returns the absolute value of the difference of the two numbers feeded as arguments.
Example :
>>> diffNumAbs(4,6)
2
>>>
"""
res = arg1 - arg2
return abs(res)
|
c0ac15cfe8b57213f22e39c6fc4e5465c63f6942
| 56,539
|
def exposure_machine_learner(ml_model, xdata, ydata, pdata):
"""Internal function to fit custom_models for the exposure nuisance model and generate the predictions.
Parameters
----------
ml_model :
Unfitted model to be fit.
xdata : array
Covariate data to fit the model with
ydata : array
Outcome data to fit the model with
pdata : array
Covariate data to generate the predictions with.
Returns
-------
array
Predicted values for the outcome (probability if binary, and expected value otherwise)
"""
# Fitting model
try:
fm = ml_model.fit(X=xdata, y=ydata)
except TypeError:
raise TypeError("Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This "
"covers both sklearn and supylearner. If there is a predictive model you would "
"like to use, please open an issue at https://github.com/pzivich/zepid and I "
"can work on adding support")
# Generating predictions
if hasattr(fm, 'predict_proba'):
g = fm.predict_proba(pdata)
if g.ndim == 1: # allows support for pygam.LogisticGAM
return g
else:
return g[:, 1]
elif hasattr(fm, 'predict'):
g = fm.predict(pdata)
return g
else:
raise ValueError("Currently custom_model must have 'predict' or 'predict_proba' attribute")
|
40a04ff921677f73d07423310deefcdb02d885bd
| 56,541
|
def dedupe_with_order(dupes):
"""Given a list, return it without duplicates and order preserved."""
seen = set()
deduped = []
for c in dupes:
if c not in seen:
seen.add(c)
deduped.append(c)
return deduped
|
93326dddba608a9ae645064f246d0557047bb1be
| 56,544
|
from typing import List
from typing import Tuple
def extract_meeting_summaries(
meetings: List[str], split_meetings: List[Tuple[str, str, str]]
) -> Tuple[str, str, str]:
"""
Get meeting time and location summary strings.
Used in extract_meetings.
Parameters
----------
meetings:
meeting strings from extract_meetings
split_meetings:
split meetings from extract_split_meetings. List of meeting [days, time, location]
Returns
-------
times_summary, locations_summary
"""
# produce long_summary
times_long_summary = "\n".join(meetings)
# make times summary as first listed
times_summary = split_meetings[0][0] + " " + split_meetings[0][1]
# collapse additional times
if len(split_meetings) > 1:
times_summary = times_summary + f" + {len(split_meetings)-1}"
# make locations summary as first listed
locations_summary = split_meetings[0][2]
# locations_summary = location_urls[0] if len(location_urls) else ""
# collapse additional locations
if len(split_meetings) > 1:
locations_summary = locations_summary + f" + {len(split_meetings)-1}"
# some final touches
times_summary = times_summary.replace("MTWThF", "M-F")
locations_summary = locations_summary.replace("MTWThF", "M-F")
times_long_summary = times_long_summary.replace("MTWThF", "M-F")
if locations_summary == "" or locations_summary[:3] == " + ":
locations_summary = "TBA"
# handle redundant dash-delimited format (introduced in fall 2020)
if locations_summary.count(" - ") == 1:
locations_1, locations_2 = locations_summary.split(" - ")
# if location is redundant
if locations_2.startswith(locations_1):
locations_summary = locations_2
return times_summary, locations_summary, times_long_summary
|
11395b6d0bd05e445ba8abe1d2178e7fd2e77abc
| 56,545
|
import requests
def get_awards(agency, keyword, year):
"""
Get awards by agency, keyword, and year variables
Returns json() response object
"""
try:
base_url = f'https://www.sbir.gov/api/awards.json?keyword={keyword}&agency={agency}&year={year}'
return requests.get(base_url).json()
except:
return 'error'
|
7ec7ecf5218c98c01803554c2590ae8fc2e63417
| 56,546
|
import codecs
import re
def _unescape(text):
"""Unescape unicode character codes within a string.
"""
pattern = r'\\{1,2}u[0-9a-fA-F]{4}'
decode = lambda x: codecs.getdecoder('unicode_escape')(x.group())[0]
return re.sub(pattern, decode, text)
|
5457bed6c1086f5989d56944a785bc17494aa739
| 56,549
|
def to_tuple(dataseq):
"""Converts a ctypes array to a tuple."""
return tuple(dataseq)
|
b116bfddae645548bce35a1d2329dd749b95aee9
| 56,553
|
def multilayer_getattr(obj, name: str):
"""Like getattr, except `name` can have dots in it."""
attr_names = name.split(".")
attr = obj
for attr_name in attr_names:
attr = getattr(attr, attr_name)
return attr
|
e1f7f17e3b0fdf8c42292fd2ee9c9b302e66561c
| 56,554
|
def strip_auth_from_url(parsed_url):
"""Return a URL from a urlparse object without a username or password."""
# Get a copy of the network location without the username or password.
straight_netloc = parsed_url.netloc.split('@')[-1]
# Replace the full network location with the stripped copy.
return parsed_url.geturl().replace(parsed_url.netloc, straight_netloc, 1)
|
da13a7da118e3e4561d7ef382dba2d3e79dbaa92
| 56,556
|
from typing import OrderedDict
def _top_level_tags(form):
"""
Returns a OrderedDict of the top level tags found in the xml, in the
order they are found.
"""
to_return = OrderedDict()
element = form.get_xml_element()
if element is None:
return OrderedDict(sorted(form.form_data.items()))
for child in element:
# fix {namespace}tag format forced by ElementTree in certain cases (eg, <reg> instead of <n0:reg>)
key = child.tag.split('}')[1] if child.tag.startswith("{") else child.tag
if key == "Meta":
key = "meta"
to_return[key] = form.get_data('form/' + key)
return to_return
|
26e2b3ac9e8aa0f28aa95fdafd8a8b9b8a0812ed
| 56,561
|
def transform_misses(record):
"""Format the missed datasets record we got from the database to adhere to the response schema."""
response = {}
response["datasetId"] = dict(record).get("stableId")
response["internalId"] = dict(record).get("datasetId")
response["exists"] = False
# response["datasetId"] = ''
response["variantCount"] = 0
response["callCount"] = 0
response["sampleCount"] = 0
response["frequency"] = 0
response["numVariants"] = 0
response["info"] = {"access_type": dict(record).get("accessType")}
return response
|
a3e02cedf76147154e4929c6313ba67d29f45eb7
| 56,568
|
import torch
def select_yx(featmap, y, x):
"""
Select x, y coordinates from feature map.
Args size:
featmap: (B, C, H, W)
x: (B, C)
y: (B, C)
"""
assert featmap.shape[:2] == x.shape == y.shape, 'X, Y coordinates should match.'
x = torch.clamp(x, 0, featmap.shape[-1] - 1)
y = torch.clamp(y, 0, featmap.shape[-2] - 1)
b, c, h, w = featmap.shape
y = y.view(b, c, 1, 1).repeat(1, 1, 1, w)
featmap = torch.gather(featmap, -2, y.long())
x = x.view(b, c, 1, 1)
featmap = torch.gather(featmap, -1, x.long())
return featmap.squeeze(-1).squeeze(-1)
|
a09bc4bfe07c0e5c7c47c123a37d1f9e7f3c4347
| 56,570
|
def select_from(items, indexes):
"""
:param items: a list of items.
:param indexes: an iterable of indexes within `items`.
:returns: a list of the items corresponding to the indexes.
"""
return [items[i] for i in indexes]
|
91edeb1cfdeaa2e57abbf0bb3f599c6855953b37
| 56,571
|
def format_trec_results(qid: str, doc: str, rank: int, score: float, run_id='RunId'):
"""
Produce a TREC formatted str of results.
:param qid: Query Id
:param doc: Document
:param rank: Rank position
:param score: Ranking score
:param run_id: Name for this run
:return String in TREC format
"""
return '{}\t0\t{}\t{}\t{:.4f}\t{}'.format(qid, doc, rank, float(score), run_id)
|
dc5095ab2a5d98c1d5096ebbd85e08e792bff477
| 56,572
|
import json
def get_blocks_from_s3_ref(blocks_s3_ref: str, s3_client) -> list:
"""Return a list of blocks from an S3 reference file."""
blocks = json.loads(s3_client.get_object_from_s3(blocks_s3_ref))
for b in blocks:
if 'parentBlockIndex' in b:
del b['parentBlockIndex']
if 'blockIndex' in b:
del b['blockIndex']
return blocks
|
1b685ba120bbd10a4e1c7c58ad9bbc61ea39e63d
| 56,574
|
def transform_keys(dct, func):
""" apply a function to each key
"""
return dict(zip(map(func, dct.keys()), dct.values()))
|
e9cf8a29ffdd4482b94bde5f21fdb4aa7e098d9f
| 56,576
|
def process_channel_names(channel_names):
"""Process to obtain the electrode name from the channel name.
Parameters
----------
channel_names : list
Channel names in the EEG.
Returns
-------
channel_names : list
Proccessed channel names, containing only the name of the electrode.
"""
channel_names = [(elem.split())[-1] for elem in channel_names]
channel_names = [(elem.replace("-", " ").split())[0] for elem in channel_names]
return channel_names
|
1f4c1f28be8955944136900abe8868cd1958930e
| 56,577
|
def clean_sentence(sentence: str):
"""
Clean OpenIE sentences by replacing the bracket shortcuts back to valid brackets
:param sentence: a sentence
:return: cleaned sentence
"""
s = sentence.replace('-LRB- ', '(')
s = s.replace('-LSB- ', '(')
s = s.replace(' -RRB-', ')')
s = s.replace(' -RSB-', ')')
return s
|
e6800c757d6a485d1b386ff1ecc055a1e191614d
| 56,578
|
def append_results(results, base, c, conf):
"""Append results before calculating metrics."""
results.append({'truth name': base['name'],
'model name': c['name'],
'path': c['path'],
'location': conf['location'],
'var': c['var']}
)
return results
|
c9911e970217c021486e69dd2f031316b4e45ea6
| 56,579
|
def _getLocationWords(location, words_index):
"""
Retrieves the words found at the passage location
:param location: The passage location e.g. book/chapter/verse without z-padding
:param words_index:
:return: a list of words
"""
if location in words_index:
return words_index[location]
else:
return []
|
56159ff2153e5731ceefcc725d1a202271249c2b
| 56,581
|
def inv(q, p):
"""
calculate q^-1 mod p
"""
for i in range(p):
if q * i % p == 1:
return i
|
83ae3cb00813eb8351bfd413ce976e607497a322
| 56,587
|
import hashlib
def digest_data(data: bytes):
"""Compute the digest of a given input byte string, which are the first
8 bytes of its sha256 hash."""
m = hashlib.sha256()
m.update(data)
return m.digest()[:8]
|
4ab5efdfcbbaa96cde92cfd2286ef13f5d361695
| 56,589
|
def massage_key(key):
"""Massage the keybase return for only what we care about"""
return {
'fingerprint': key['key_fingerprint'].lower(),
'bundle': key['bundle']
}
|
229cf81b90e9d4602ad2e25f7cc6fcea6afd12e8
| 56,590
|
def broadcast(item, length, allowed_types, name="item"):
"""Broadcast item to given length.
Parameters
----------
item : object
Object to broadcast
length : int
Length to broadcast to
allowed_types : list
List of allowed types
name : str, optional
Name of item
Returns
-------
object:
The original item broadcasted to sequence form of length
Raises
------
TypeError
"""
if isinstance(item, allowed_types):
return length * (item,)
elif len(item) == 1:
return length * item
elif len(item) != length:
raise TypeError("'{0}' must be a single value, a list with " +
"one element or a list with {1} elements.".format(
name, length))
return item
|
d27f72b4b5ffce58b36e9416398f6c2594b04c2a
| 56,592
|
def read_byte(s):
"""
Read in one byte from serial, and return it as an int.
@param s: Serial object to be read.
@returns: Byte received from serial.
"""
b = s.read(1)
return ord(b)
|
304cd1d6da0f2970310c78e3a3715864a31bb3c2
| 56,597
|
import torch
def describe_data(D):
"""Prints size, min, max, mean and std of a matrix (numpy.ndarray or torch.Tensor)
"""
s = '{:8s} [{:.4f} , {:.4f}], m+-s = {:.4f} +- {:.4f}'
si = 'x'.join(map(str, D.shape))
if isinstance(D, torch.Tensor):
vals = D.min().item(), D.max().item(), D.mean().item(), D.std().item()
else:
vals = D.min(), D.max(), D.mean(), D.std()
return s.format(si, *vals)
|
73b2487a3281599c69d2289622972ac8cce7912b
| 56,599
|
import math
def ruze_loss_factor(surface_rms,wavelength):
"""
Loss due to reflecting surface irregulaties in a paraboloid
Given a surface r.m.s. deviation from perfect, this returns the
efficiency of a reflector relative to one with a perfect surface at
the specified wavelength. surface_rms and wavelength must be in the
same units.
The aperture (or beam) efficiency is this times the ideal aperture
(or beam) efficiency considering blockage and beam taper.
@param surface_rms : r.m.s. of the surface deviation from a parabola
@type surface_rms : float
@param wavelength : in the same units as surface_rms.
@type wavelength : float
@return: 0 < float < 1
"""
return math.exp(-(4*math.pi*surface_rms/wavelength)**2)
|
830fd6b3bf8f20cfad8dcaca2cfaec5c210fa717
| 56,600
|
import click
def validate_tags(value):
"""
Validate and parse optional EC2 tags.
"""
err_msg = ("Tags need to be specified as 'Key,Value' pairs "
"separated by a single comma. Key cannot be empty "
"or be made up entirely of whitespace.")
tags = value
result = []
for tag in tags:
if tag.count(',') != 1:
raise click.BadParameter(err_msg)
key, value = [word.strip() for word in tag.split(',', maxsplit=1)]
if not key:
raise click.BadParameter(err_msg)
result.append({'Key': key, 'Value': value})
return result
|
013957f01bf13fdccdddc864c9eda3e0b9f530a7
| 56,602
|
import pickle
import glob
def get_num_classes(data_dir):
"""
Get the number of classes.
:param data_dir: str - data directory
:return: int - number of classes
"""
mode = 'training' # arbitrary
loc = "{}/{}".format(data_dir, mode)
with open('{}/labels.pickle'.format(data_dir), 'rb') as f:
data = pickle.load(f)
modes = list(data.keys())
assert glob.glob(data_dir), "Check directory."
assert glob.glob("{}/*.jpg".format(loc)), "Check file extension (should be 'jpg')."
i = 0 # Arbitrarily chosen
return len(data[modes[i]][i][-1])
|
b335c64a54b26d1732274495f53feda07f394598
| 56,605
|
from typing import Dict
from typing import Any
async def attach_sample_count(db, document: dict) -> Dict[str, Any]:
"""
Attach the number of samples associated with the given label to the passed document.
"""
return {
**document,
"count": await db.samples.count_documents({"labels": document["id"]}),
}
|
79fba1a147ad9add707a54e208d5ce26699ba614
| 56,607
|
def mycipher(mystring:str)->str:
"""
This function performs an alphabatical cipher
# Input:
mystring: str, String over which cipher has to be done
# Returns:
str: Encrypted String
# Funcationality:
We need to shift each character by 5, if it is on the bottleneck, it starts again from a.
here only lowercase alphabets are used. so we check if the order of input character is
+ 5 greater than ord of z, then we substract 26 out of it resuling in -21 (5-26) else
we add it directly.
"""
result = "".join([chr(ord(x)-21) if (ord(x)+5) > ord('z') else chr(ord(x) + 5) for x in mystring])
return result
|
1125907eaba619da319fe18c05883ac136654e11
| 56,609
|
def get_crop(mask_image, scale_ratio=1.2):
"""
Get bounding box of square encompassing all positive
values of the mask, with a margin according to scale_ratio
Args:
scale_ratio: final crop is obtained by taking the square
centered around the center of the tight bounding box
around the masked values, with a square size of
scale_ratio * size of largest bounding box dimension
"""
mask_image = mask_image.numpy()
xs, ys = (mask_image[:, :, ::3].sum(2) > 0).nonzero()
x_min = xs.min()
x_max = xs.max()
y_min = ys.min()
y_max = ys.max()
radius = max((x_max - x_min), (y_max - y_min)) // 2 * scale_ratio
x_c = (x_max + x_min) / 2
y_c = (y_max + y_min) / 2
x_min = max(int((x_c - radius).item()), 0)
y_min = max(int((y_c - radius).item()), 0)
x_max = int((x_c + radius).item())
y_max = int((y_c + radius).item())
return x_min, y_min, x_max, y_max
|
00c882abfb5ff0a461dc1d3b930471fc4c5ad2fe
| 56,612
|
def flip_array_bit(ba, bitnum):
"""flips a bit in an array, 0 = MSB. Works with numpy arrays or byte arrays"""
ba[bitnum >> 3] ^= (1 << (7 - (bitnum % 8)))
return ba
|
31358bb6d4b1ffe186ba1427ea704fe784e12b07
| 56,613
|
import requests
from bs4 import BeautifulSoup
def get_soup(url):
"""
Makes a request to the given url and returns a BeautifulSoup instance of the html
"""
res = requests.get(url)
if len(res.content) < 10:
return None
return BeautifulSoup(res.content, "html.parser")
|
c19ca98f0ae707ea4caf4765548fe8517ca4aa1b
| 56,619
|
from math import ceil
def get_week_number(dt):
"""
Returns the week number of the month for the specified date.
https://stackoverflow.com/questions/3806473/python-week-number-of-the-month/16804556
"""
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
return int(ceil(adjusted_dom/7.0))
|
114e3bed7d3923c886bb3fd640f16992cbe7325e
| 56,620
|
def _convert_soap_method_args(*args):
"""Convert arguments to be consumed by a SoapClient method
Soap client required a list of named arguments:
>>> _convert_soap_method_args('a', 1)
[('arg0', 'a'), ('arg1', 1)]
"""
soap_args = []
for arg_n, arg in enumerate(args):
soap_args.append(('arg' + str(arg_n), arg))
return soap_args
|
a57c993c5bff622d52a5b8bb06259644e725ec6b
| 56,621
|
def dict_to_str(d):
"""
Given a dictionary d, return a string with
each entry in the form 'key: value' and entries
separated by newlines.
"""
vals = []
for k in d.keys():
vals.append('{}: {}'.format(k, d[k]))
v = '\n'.join(vals)
return v
|
a2c3c87715ccdacafb76dba57050ea2b942ce0d2
| 56,625
|
async def get_random_bnum(session):
"""Use i'mfeelinglucky to get a random bnum"""
async with session.get("https://iiif.wellcomecollection.org/service/suggest-b-number?q=imfeelinglucky") as response:
json = await response.json()
return json[0].get("id", "")
|
faac8ce405eff04ca255c7a9aaeb9496f12ca85c
| 56,631
|
def combine_counts(counts1, counts2):
"""
Combine two counts dictionaries.
"""
ret = counts1
for key, val in counts2.items():
if key in ret:
ret[key] += val
else:
ret[key] = val
return ret
|
dd73763485ed5462dbb7219f3606c57633d00aed
| 56,632
|
def read_payload(stream, size):
"""Reads a payload of a given size from a stream."""
return stream.read(size)
|
1b994cbaedacdc7b6400ef59c105b6cbb2389d6b
| 56,634
|
def org_active_period(df):
"""Total number of days 'active, i.e. first commit to last commit'"""
duration = abs((df.index[-1]-df.index[1]).days)
return duration
|
4d89e2918c9e69ccb658960771a741b5cc601ef6
| 56,635
|
import base64
def auth_url_encode(byte_data):
"""
Safe encoding handles + and /, and also replace = with nothing
:param byte_data:
:return:
"""
return base64.urlsafe_b64encode(byte_data).decode('utf-8').replace('=', '')
|
809947646f68fdf3a3f17729d4d8b09cda1bd03c
| 56,637
|
def multiply_matrix_by_real(a, real):
"""
Return result of real*a
:param a: a 2 dimensional array
:param real: a real
:return: a 2 dimensional array
"""
return [[j*real for j in i] for i in a]
|
64fb95af49607d79fa0c5eed6e9674708ecf2890
| 56,640
|
def trim_mask_and_true_seqs(mask_seq, true_seq):
"""Given an equal-length mask and sequence, removes gaps from the ends of both."""
mask_seq_no_left = mask_seq.lstrip('-')
mask_seq_no_right = mask_seq.rstrip('-')
n_removed_left = len(mask_seq) - len(mask_seq_no_left)
n_removed_right = len(mask_seq) - len(mask_seq_no_right)
n_removed_right = None if n_removed_right == 0 else -n_removed_right
true_seq = true_seq[n_removed_left:n_removed_right]
mask_seq = mask_seq.strip("-")
return mask_seq, true_seq
|
3d7dcc31ca6e7f89590d6b988a61792763b12375
| 56,644
|
def drag(M):
"""Drag coefiicient of a cylinder in transonic flight.
Reference: S. F. Hoerner, "Fluid-Dynamic Drag" Ch 16.3
Arguments:
M (scalar): Mach number [units: dimensionless].
"""
# Hoerner says K_fore = 0.9. The formula below is a hack to
# make the curve match Hoerner ch 16 figure 14.
K_fore = 0.9 if M > 1 else 0.8
# Stagnation pressure increment / dynamic pressure
qq = 1 + M**2/4 + M**4/10 # Eqn 15.4
if M >= 1:
# Include pressure loss due to normal shock
qq = 1.84 - 0.76/M**2 + 0.166/M**4 + 0.035/M**6 # Eqn 16.4
C_D = K_fore * qq
return C_D
|
0078b240e8957ddc9b3f528c1856c264f0429948
| 56,645
|
def replace_text(text, variables):
"""
Replace some special words in text to variable from dictionary
@param text: raw text
@param variables: dictionary of variables for replace
"""
for name, value in variables.items():
text = text.replace('#%s#' % name, str(value))
return text
|
185a02654f0fef5d979d9fd0578f6a557a1298af
| 56,646
|
def check_last_dates(db):
"""
Check regions, that have last_date set to zero.
:param db: instance of DatabaseQueries class <object>.
:return: list of region ids with last_date = 0 <list> or None if empty list.
"""
last_dates_params = {'table': 'regions',
'conditions': [],
'condition_data': (),
'filters': ['id', 'last_date']}
last_dates = db.select(**last_dates_params)
# select zero value last_dates
zero_last_dates = [x for x in last_dates if x[1] == 0]
# if there are zero last dates return a list of ids with zero last date
if zero_last_dates:
return [x[0] for x in zero_last_dates]
else:
return None
|
d665dec365a1ce1340cb8d562fe3da38844aef7b
| 56,649
|
def carrier_child_fileappend(child, files, baseurl, blitz=False):
"""
Append bar file links to a list from a child element.
:param child: Child element in use.
:type child: xml.etree.ElementTree.Element
:param files: Filelist.
:type files: list(str)
:param baseurl: Base URL, URL minus the filename.
:type baseurl: str
:param blitz: Whether or not to create a blitz package. False by default.
:type blitz: bool
"""
if not blitz:
files.append(baseurl + child.get("path"))
else:
if child.get("type") not in ["system:radio", "system:desktop", "system:os"]:
files.append(baseurl + child.get("path"))
return files
|
15fdd9e347419021457d39520ae52da8f2c9cd56
| 56,650
|
def _IsVolumeMountKey(key):
"""Returns True if the key refers to a volume mount."""
return key.startswith('/')
|
a5ee0763b1aee8db99a19b408e2a92e6df08dfda
| 56,651
|
def _point_within_bounds(bounds, p):
"""Check if point is within bounds, end points inclusive"""
A, B = bounds
# we have to add epsilon since test against horizontal or vertical
# lines may fail if the point is off by numerical precision
eps = 1e-10
(Ax,Ay), (Bx,By), (px,py)=A,B,p
return (
(min((Ax,Bx))-eps<=px<=max((Ax,Bx))+eps) and
(min((Ay,By))-eps<=py<=max((Ay,By))+eps)
)
|
c63e4f727ff797674e588f46f9267c6ba526105d
| 56,655
|
def is_before_version(ref_version, check_version):
"""Check if a version str is before another version str
Parameters
----------
ref_version : str
Reference version to compare with ("1.2.3" for instance)
check_version : str
Version to check if before reference ("1.3.4" for instance)
Returns
-------
is_before : bool
True if check_version is before ref_version
"""
if ref_version == check_version:
return False
ref_list = [int(val) for val in ref_version.split(".")]
check_list = [int(val) for val in check_version.split(".")]
for ii in range(len(check_list)):
if len(ref_list) < ii + 1:
return False
if ref_list[ii] > check_list[ii]:
return True
elif ref_list[ii] < check_list[ii]:
return False
# Case 2.1.14.2 vs 2.1.14
if len(ref_list) > len(check_list):
return True
|
d0648dcd4ea17cd7dfcbabed12b96ea40d134c3f
| 56,656
|
def compare(v1, v2):
"""
Order smallest to largest.
"""
if v1 < v2:
return -1
elif v1 > v2:
return 1
return 0
|
24727b7e6b273b4bd25644191393cb8b2263286d
| 56,663
|
def pages(count, key="page"):
"""
Renders a pages block [<<] [1] [2] [3] [>>]
:param count: Maximum of pages
:param key: A key from the context to determine the current page.
"""
return {
"class": "pages",
"key": key,
"count": count
}
|
b70fd40fc5679e3f31fe291e9576dedd2a0fc8c7
| 56,664
|
from typing import List
def nifs3_m(x: List[float], y: List[float] ) -> List[float]:
"""Funkcja generująca wartości momentów NIFS3.
Parameters
----------
x : List[float]
Wartości na osi X.
y : List[float]
Wartości na osi Y.
Returns
-------
List[float]
Momenty NIFS3.
"""
tmp = lambda x1, x2, y1, y2: (y2-y1) / (x2-x1)
tmp2 = lambda x1, x2, x3, y1, y2, y3: (tmp(x2, x3, y2, y3)-tmp(x1, x2, y1, y2)) / (x3-x1)
n = len(x)-1
h = [0.0] + [x[i]-x[i-1] for i in range(1, n+1)]
lam = [0.0] + [h[i] / (h[i]+h[i+1]) for i in range(1, n)]
d = [0.0] + [6.0*tmp2(x[i], x[i+1], x[i+2], y[i], y[i+1], y[i+2]) for i in range(0, n-1)]
q = [0.0]
p = [0.0]
u = [0.0]
for i in range(1, n):
p.append(lam[i]*q[i-1] + 2)
q.append((lam[i]-1) / p[i])
u.append((d[i] - lam[i]*u[i-1]) / p[i])
M = [0.0 for i in range(n+1)]
M[n-1] = u[n-1]
for k in range(2, n-1):
i = n-k
M[i] = u[i] + q[i]*M[i+1]
return M
|
ce442a2b114171b24a7549f248f7075184a53446
| 56,667
|
from typing import Dict
from typing import Any
from typing import Sequence
import shlex
def _construct_spark_submit_command(spark_opts: Dict[str, Any], app_and_app_arguments: Sequence[str]) -> str:
"""Construct a spark-submit command from smspark-submit options, app, and app arguments.
Args:
spark_opts (Sequence[str]): A sequence of strings to pass to spark-submit.
app_and_app_arguments (Sequence[str]): A sequence of strings consisting of the application jar or file,
and application arguments.
Returns:
str: The string to be used as the spark-submit command.
"""
spark_options_list = []
for key, val in spark_opts.items():
# "-v" or "--verbose" is an option flag, unlike the long options, which require a value.
if key == "verbose" and val is True:
spark_options_list.append("--verbose")
continue
# Undo collision between python built-in "class" and spark "--class" option.
if key == "class_":
key = "class"
# Python converts hyphenated options ("py-files") to underscored attributes ("py_files"). Undo underscores.
if "_" in key:
key = key.replace("_", "-")
if val:
spark_options_list.append(f"--{key}")
spark_options_list.append(val)
cmd = ["spark-submit", "--master", "yarn", "--deploy-mode", "client"]
cmd.extend(spark_options_list)
cmd.extend(app_and_app_arguments)
cmd_string = " ".join(shlex.quote(c) for c in cmd)
return cmd_string
|
a4a3b1f234c804ae57a4af15f3604823bcbdda1d
| 56,668
|
def build_score_args(args):
"""Compose command line for the `score` program."""
command_line = []
if args.labels_file:
command_line += [
'--SourceLabels',
'--SourceLabelCountsLHS',
'--SourceLabelSet',
]
if args.parts_of_speech:
command_line.append('--PartsOfSpeech')
if args.inverse:
command_line.append('--Inverse')
if args.args is not None:
command_line.append(args.args)
return ' '.join(command_line)
|
574f5d9e356f1be00e5dded2fb307eb297290e8f
| 56,669
|
def integer(input):
"""Convert the given input to an integer value.
:param input: the value to convert to an integer
:type input: any
:returns: converted integer value
:rtype: int
"""
try:
return int(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to an integer value.".format(input))
|
9a5a921a69271f9f8159ab6569270d35f9e257bd
| 56,671
|
def _glyph_kerning_attr(glyph, side):
"""Return leftKerningGroup or rightKerningGroup depending on the UFO
group's side (1 or 2).
"""
if int(side) == 1:
return "rightKerningGroup"
else:
return "leftKerningGroup"
|
46c0893fe31e44ac28ab4dad832d1e6363fb1ab9
| 56,673
|
def doc_summary(lines):
"""Extract summary of docs."""
summary = []
for line in lines:
stripped = line.strip().lower()
if (stripped.startswith('to use this normalizer') or
stripped.startswith('use ``method')):
continue
if (line.startswith('Parameters') or line.startswith('Example')
or line.startswith('.. note::')):
break
summary.append(line)
return summary
|
7640e91f8fa286567c987359add39404800db840
| 56,675
|
def lower(value):
"""Lowercase the string passed as argument"""
return value.lower() if value else value
|
d3ca969eb536c23503afa93d1872d1d600e93b80
| 56,676
|
def maneuverToDir(str: str) -> int:
"""
maps dubins curve action to an interger {-1, 0, 1}
Paramters
---------
str: str
dubins curve action
Returns
-------
int
L -> 1, R -> -1, S -> 0
"""
if str == 'L':
return 1
if str == 'R':
return -1
return 0
|
a70d65f89c20e281eae6b69443a9bdc9fbe04eb1
| 56,677
|
def intparse(text):
"""Parse a command line argument as an integer.
Accepts 0x and other prefixes to allow other bases to be used."""
return int(text, 0)
|
6fc362a1c43124a7f6bcf1f940e238d6eeac9a04
| 56,678
|
def get_constants_from_Y_G(y, g):
"""
Calculates the remaining elastic quantities given the young's modulus (y),
and the shear modulus (g)
Returns:
(dict): mapping from string symbol to float value
"""
# Bulk modulus
b = y * g / (3 * (3 * g - y))
# Poisson Ratio
v = y / (2 * g) - 1
# Lame's First Parameter
l = g * (y - 2 * g) / (3 * g - y)
# P-Wave Modulus
p = g * (4 * g - y) / (3 * g - y)
return {"K": b, "v": v, "l": l, "M": p}
|
5a21fd7fbc68427845e9bcddf65fdb741ec51a53
| 56,679
|
import ast
from typing import List
def _get_module_function_names(module_node: ast.Module) -> List[str]:
"""For a module node, get a list of the function names defined in the
module scope."""
result = []
for node in ast.iter_child_nodes(module_node):
if isinstance(node, ast.FunctionDef):
result.append(node.name)
return result
|
115796a6e860c9aaa80e1db60b43456a05ae558f
| 56,680
|
def alltypes_callback(conn, object_name, methodname, **params):
# pylint: disable=attribute-defined-outside-init, unused-argument
# pylint: disable=invalid-name
"""
InvokeMethod callback defined in accord with pywbem
method_callback_interface which defines the input parameters and returns
all parameters received.
"""
return_params = [params[p] for p in params]
return_value = 0
return (return_value, return_params)
|
afa90bc0f492fee6339deb454d87d27edbe92c34
| 56,683
|
from typing import Iterator
def create_index(
dictset: Iterator[dict],
index_column: str) -> dict:
"""
Create an index of a file to speed up look-ups, it is expected that the
value in the index_column is unique but this is not enforced.
Parameters:
dictset: iterable of dictionaries
The dictset to process
index_column: string
the column in the dictset to index on
Returns:
dictionary
"""
index = {}
for record in dictset:
index_value = record[index_column]
index[index_value] = record
return index
|
89aaae999dc0ab770a2a431a74e9eb986fe16241
| 56,685
|
def is_palindrome(word):
"""Check if input is a palindrome."""
if len(word) <= 1:
return True
return word[0] == word[-1] and is_palindrome(word[1:-1])
|
a4801c6cfeadff79f9b0bd0dc26f8d68cf7e28f3
| 56,686
|
def parse_response(response):
"""Parse Office365 Endpoints API results to a list of IP addresses."""
ip_list = list()
for entry in response:
if 'ips' in entry: # ignore everything that isn't an IP
ip_list += entry['ips']
clean_list = (dict.fromkeys(ip_list)) # automatically remove duplicates
return clean_list
|
0b268909d50cc7a1319b439eac2e3adb44d7610b
| 56,688
|
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
(24 * 60 * 60, 'days'),
(60 * 60, 'hours'),
(60, 'minutes'),
(1, 'seconds'),
]
if time == 0:
return ('0', 'seconds')
for unit in units:
if time >= unit[0]:
return ('{0}'.format(time // unit[0]), unit[1])
|
7f3e75073f5f323546ed5a6f48ac9a3351ce62e8
| 56,689
|
import struct
def uint32(n):
""" UINT32(n) is the 4 byte value of n in big-endian (network) order. """
return struct.pack('!I', n)
|
c2e73d25e7ff32ab85980e0f0d287bf154a58213
| 56,694
|
def join_results(search_type: str, prior_results: dict, new_results: list) -> dict:
"""
Join the new results with the prior results disjunctively or conjunctively.
Args:
search_type (str): "disjunctive" (or the results) or "conjunctive" (and the results)
prior_results (dict): results from prior searches. (key = db+ed+rec)
new_results (list): results from latest search.
Returns:
(dict): joined results
"""
# Disjunctive search means merge all the search results into a super-set
# containing all results found in all searches.
if search_type == "disjunctive":
for result in new_results:
key = "{}-{}-{}".format(result.db, result.ed, result.rec)
prior_results[key] = result
return prior_results
# Conjunctive search means the end result must be just those results
# that were in each sub search.
# First, convert the array to a dictionary
new_results_dict = {}
for result in new_results:
key = "{}-{}-{}".format(result.db, result.ed, result.rec)
new_results_dict[key] = result
# Now find the keys in common
if prior_results:
merged_result = {key: prior_results[key] for key in prior_results.keys() & new_results_dict.keys()}
else:
merged_result = new_results_dict
return merged_result
|
66cf038a2000ece8c70cf1b92794142cae21aeb5
| 56,696
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.