content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def safe_col_name(args_pair):
"""Ensure that the column name is safe for SQL (unique value, no spaces, no trailing punctuation).
Typically called with `df.columns = [*map(safe_col_name, enumerate(df.columns.to_list()))]`
Args:
args_pair: tuple of arguments from map function in `(idx, col)`
Returns:
string: safely formatted string for SQLite
"""
idx, col = args_pair
col = col.strip().replace(' ', '_').replace('.', '_').replace(',', '_')
return str(idx) if col == '' else col
|
51256b15ee2fcc55cbc77dbdc2aa03408a6f1e26
| 28,585
|
def podcast_info(podcast_obj):
""" Converts gpodder podcast object to a dictionary structure. """
podcast_dict = dict()
fields = ['title', 'description', 'website', 'subscribers', 'logo_url']
for f in fields:
podcast_dict[f] = podcast_obj[f]
return podcast_dict
|
a143d4b64e650ea38f9d6b6c6eb11f43a3c1cc3a
| 602,370
|
def chunk_string(s, l=10):
"""
Split a string into chunks of a set length.
Args:
s: The string to chunk.
l: The length of the chunks.
Returns:
A list containing the string chunks.
"""
return [s[i:i+l] for i in range(0,len(s),l)]
|
289bd981f91a3404810af1006770c8d7966c3ecf
| 467,105
|
def pad(region, margins):
"""
Pad region according to a margin
:param region: The region to pad
:type region: list of four floats
:param margins: Margin to add
:type margins: list of four floats
:returns: padded region
:rtype: list of four float
"""
out = region[:]
out[0] -= margins[0]
out[1] -= margins[1]
out[2] += margins[2]
out[3] += margins[3]
return out
|
a56085f5e42122307cd4236d051447c66aaa68f9
| 65,093
|
from typing import Dict
def strip_spaces_in_options(opt_dict: Dict[str, str]):
"""Remove spaces around options"""
return {key.strip(): value.strip() for key, value in opt_dict.items()}
|
caa08fbc98573bab50c500871562bf2f1b260d3e
| 513,294
|
from typing import List
def combine(nums: List[int], ops: List[str]) -> str:
""" Return the expression where the <nums>
and <ops> are combined"""
ans = ""
for i in range(len(ops)):
ans += str(nums[i]) + ops[i]
ans += str(nums[len(nums)-1])
return ans
|
144557dafbc620809f3276a28cc1cba138f0c045
| 490,850
|
import re
def change_link_markdown(description):
"""Change markdown styling of links to match fit Slack Markdown styling
Description text links formatted as [link name](URL), we want <URL|link name>
"""
return re.sub('\[(?P<label>[^\[\]]+)\]\((?P<url>[^()]+)\)', '<\g<url>|\g<label>>', description)
|
ac6844ad9da4d02f771697c146f220ef81e08bbd
| 429,857
|
from functools import reduce
def combine_int_overlaps(ranges):
"""
Merge the range pairs if there is overlap exists between the given ranges.
This algorithm needs a sorted range list in terms of the start time.
Note that neighboring numbers lead to a merged range.
Example:
[(1, 10), (11, 20)] -> [(1, 20)]
Refer to the original code at https://stackoverflow.com/a/59378428
Args:
ranges(list):
List containing ranges.
Example: [(102, 103), (104, 109), (107, 120)]
Returns:
merged_list (list):
List containing the combined ranges.
Example: [(102, 120)]
"""
ranges = sorted(ranges, key=lambda x: x[0])
merged_list = reduce(
lambda x, element: x[:-1:] + [(min(*x[-1], *element), max(*x[-1], *element))]
if x[-1][1] >= element[0] - 1
else x + [element],
ranges[1::],
ranges[0:1],
)
return merged_list
|
6ef8bea2fa7d87efa216b13aaee47ed558ebea06
| 487,658
|
import math
def get_param_score(param, max_value, weight=1):
"""Return paramater score given its current value, max value and
parameter weight."""
return (math.log(1 + param) / math.log(1 + max(param, max_value))) * weight
|
e614f2f27bc354c1fa1e9ba7a8bb0025bb4aac9f
| 568,336
|
def get_publication(context, pub):
""" Get a single publication."""
return {'publication': pub}
|
9d271a98d97e99de46ead289f179321b7fe2a478
| 454,734
|
def monkey_patch_override_instance_method(instance):
"""
Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`.
"""
def perform_override(override_fn):
fn_name = override_fn.__name__
original_fn_name = '_original_' + fn_name
# Override instance method, if it hasn't already been done
if not hasattr(instance, original_fn_name):
original_fn = getattr(instance, fn_name)
setattr(instance, original_fn_name, original_fn)
bound_override_fn = override_fn.__get__(instance)
setattr(instance, fn_name, bound_override_fn)
return perform_override
|
013bbcff43dc0b66ba78c3519ef1370be5650467
| 114,213
|
import math
def Vabrms_calc(va,vb):
"""Inverter terminal voltage - line to line RMS"""
return abs(va-vb)/math.sqrt(2)
|
aeb9b30990513f88d4a671c7de035d0a5cd64296
| 10,048
|
import importlib
def load_from_package(name, attr):
"""Load `attr` from the package `name`."""
try:
module = importlib.import_module(name)
if module is None:
return None
return getattr(module, attr, None)
except ImportError:
pass
return None
|
95942db4a0ff5cd8f8e3dc6f0f7985a3345b869f
| 578,246
|
import re
import glob
def find_files(input_path, framerange=None):
"""
Discovers files on the filesystem.
:param input_path: Path to the file sequence
:param framerange: optional framerange
:return: array of single file paths
"""
files = []
if '%' not in input_path:
return [input_path]
if framerange:
for part_range in framerange.split(','):
if '-' in part_range:
first, last = part_range.split('-')
for i in range(int(first), int(last) + 1):
files.append(input_path % i)
else:
files.append(input_path % int(part_range))
else:
input_path = re.sub(r'(\%0[4-8]d)(\.[exr|EXR])', r'*\2', input_path)
files = glob.glob(input_path)
files = sorted(files)
return files
|
8f2818b6c8b72f344c70adee0e79ac52c2313902
| 14,927
|
import time
def log_str(proc_str, start):
"""Create a preamble string for stdout."""
former = "[{:>4}".format(proc_str)
latter = "] {:.2f}".format(round(time.time() - start, 3))
return former + latter
|
5f4f82e758cf87ff643a20928e5c0f7368ef537f
| 10,218
|
def model(p, x):
""" Evaluate the model given an X array """
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3.
|
fe923f6f6aea907d3dc07756813ed848fbcc2ac6
| 708,668
|
def generate_policy(principal_id, effect, method_arn):
"""
Utility function to generate a policy document
Args:
principal_id (str): request's user
effect (str): Allow or Deny
method_arn (str): resource's ARN
Returns: dict
"""
if not effect or not method_arn:
return None
response = {
"principalId": principal_id,
"policyDocument": {
"Version": "2012-10-17",
"Statement": [{
"Action": "execute-api:Invoke",
"Effect": effect,
"Resource": method_arn
}]
}
}
return response
|
5558121401a265705434cdbd5a6418cfa7436907
| 202,521
|
def _FindOrAddSolution(solutions, name):
"""Find a solution of the specified name from the given list of solutions.
If no solution with the specified name is found, a solution with the
specified name is appended to the given list of solutions. This function thus
always returns a solution.
Args:
solutions: The list of solutions to search from.
name: The solution name to search for.
Returns:
The solution with the specified name.
"""
for solution in solutions:
if solution['name'] == name:
return solution
solution = {'name': name}
solutions.append(solution)
return solution
|
50d7d93a0a43062ceba2abd8677e6ca77596911e
| 16,880
|
def val_err_str(val: float, err: float) -> str:
"""
Get a float representation of a value/error pair and create a string representation
12.345 +/- 1.23 --> 12.3(12)
12.345 +/- 0.012 -> 12.345(12
12345 +/- 654 ---> 12340(650)
:param val: float representing the value
:param err: float representing the error in the value
:return: a string representation of the value/error pair
"""
err_sig_figs = 2 # future upgrade path is to allow user to set this
dps = 2 - err_sig_figs
if err < 10:
while err < 10.:
err *= 10
dps += 1
err = round(err, 0)
else: # err > 10
while err > 100.:
err /= 10
dps -= 1
err = round(err, 0) * 10 ** (-dps)
val = round(val, dps)
return f"{val:.{max(0, dps)}f}({err:.0f})"
|
5b759ff8e6996704edb7f6b68f6cb7e307593c9e
| 705,487
|
def stripped_string_concat(str1, str2):
"""Concatenates passed strings and truncates spaces in the result.
:param str1: First string
:type str1: str
:param str2: Second string
:type str2: str
:return: A string with truncated spaces
:rtype: str
"""
return f'{str1} {str2}'.strip()
|
17e45018e03f68ac5b635c149cc413e41a2cb343
| 40,633
|
import click
def validate_profile(context, param, value):
"""
Validates existance of profile.
Returns the profile name if it exists; otherwise throws BadParameter
"""
if value in context.obj.configuration.profiles():
return value
else:
raise click.BadParameter("\"%s\" was not found" % value)
|
ac1fd3caa99a510173aa96f4c781abfacb6eed97
| 8,607
|
import logging
def setup_logging(handler, exclude=['opbeat',
'gunicorn',
'south',
'opbeat.errors']):
"""
Configures logging to pipe to Opbeat.
- ``exclude`` is a list of loggers that shouldn't go to Opbeat.
For a typical Python install:
>>> from opbeat.handlers.logging import OpbeatHandler
>>> client = Opbeat(...)
>>> setup_logging(OpbeatHandler(client))
Within Django:
>>> from opbeat.contrib.django.logging import OpbeatHandler
>>> setup_logging(OpbeatHandler())
Returns a boolean based on if logging was configured or not.
"""
logger = logging.getLogger()
if handler.__class__ in map(type, logger.handlers):
return False
logger.addHandler(handler)
# Add StreamHandler to sentry's default so you can catch missed exceptions
for logger_name in exclude:
logger = logging.getLogger(logger_name)
logger.propagate = False
logger.addHandler(logging.StreamHandler())
return True
|
3e2977c4fa8f79925e8f80359baabd4ca976d51d
| 497,525
|
def get_percent(part, total):
"""
Return the percentage that part is of total and multiply by 100
If total is 0, return 0
"""
return (part / total) * 100 if part and total else 0
|
9510906dd0cc86f22571702dd10b42322b804142
| 509,264
|
def get_snapshot(ec2, snapshot_name: str):
"""Returns a snapshot by its name."""
res = ec2.describe_snapshots(Filters=[
{'Name': 'tag:Name', 'Values': [snapshot_name]},
])
if len(res['Snapshots']) > 1:
raise ValueError('Several snapshots with Name=%s found.' % snapshot_name)
snapshot = res['Snapshots'][0] if len(res['Snapshots']) else {}
return snapshot
|
cd6076e04fd2c83e50b3a0ac7bc117793c92731d
| 35,697
|
def apply_query(queryset, params):
"""
Apply a dict-based set of filters & parameters to a queryset.
queryset is a Django queryset, eg MyModel.objects.all() or
MyModel.objects.filter(user=request.user)
params is a dictionary that contains the following:
filtering: A dict of Django ORM filters, eg:
{'user__id__in': [1, 3, 103], 'title__contains': 'foo'}
other_filter: Another filter of some type, most likely a
set of Q() objects.
sorting: The name of the column to sort by
"""
for key in params['filtering'].keys():
filter = {key: params['filtering'][key]}
queryset = queryset.filter(**filter)
if params.get('other_filter', None):
# eg a Q() set
queryset = queryset.filter(params['other_filter'])
sorting = params.get('sorting', None)
if sorting:
sortreverse = params.get('sortreverse', None)
if sortreverse:
sorting = "-%s" % sorting
queryset = queryset.order_by(sorting)
return queryset
|
d3989cc851ea3e40532769f6a7556b5ec2cda8a6
| 166,643
|
import socket
def listen(addr, family=socket.AF_INET, backlog=50):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(backlog)
return sock
|
38e422c06a97aaabec625a7f64cd6715f34afca9
| 398,161
|
def _CheckNoSandboxedRendererSwitch(input_api, output_api):
"""Checks that tests do not add the AwSwitches.WEBVIEW_SANDBOXED_RENDERER
command line flag. Tests should instead use @OnlyRunIn(MULTI_PROCESS).
"""
# This will not catch multi-line annotations (which are valid if adding
# multiple switches), but is better than nothing (and avoids false positives).
sandboxed_renderer_pattern = input_api.re.compile(
r'^\s*@CommandLineFlags\.Add\(.*'
r'\bAwSwitches\.WEBVIEW_SANDBOXED_RENDERER\b.*\)$')
errors = []
def _FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
black_list=input_api.DEFAULT_BLACK_LIST,
white_list=[r'.*\.java$'])
for f in input_api.AffectedSourceFiles(_FilterFile):
for line_num, line in f.ChangedContents():
match = sandboxed_renderer_pattern.search(line)
if match:
errors.append("%s:%d" % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitPromptWarning("""
android_webview/javatests/ should not use AwSwitches.WEBVIEW_SANDBOXED_RENDERER
to run in multi-process only. Instead, use @OnlyRunIn(MULTI_PROCESS).
""", errors))
return results
|
3ecc4528efd3b9b680d2a88a3dadbe095599c467
| 572,095
|
def _construct_version(major, minor, patch, level, pre_identifier, dev_identifier, post_identifier):
"""Construct a PEP0440 compatible version number to be set to __version__"""
assert level in ["alpha", "beta", "candidate", "final"]
version = "{0}.{1}".format(major, minor)
if patch:
version += ".{0}".format(patch)
if level == "final":
if post_identifier:
version += ".{0}{1}".format("post", post_identifier)
if dev_identifier:
version += ".{0}{1}".format("dev", dev_identifier)
else:
level_short = {"alpha": "a", "beta": "b", "candidate": "rc"}[level]
version += "{0}{1}".format(level_short, pre_identifier)
if post_identifier:
version += ".{0}{1}".format("post", post_identifier)
if dev_identifier:
version += ".{0}{1}".format("dev", dev_identifier)
return version
|
9cf26af2250a2fed80b6e63390abc883dcba232b
| 675,515
|
def test_model(model, test_data):
"""
Run predictions.
Args:
model: Trained model.
test_data: Test dataset.
Returns:
List of NumPy arrays containing predictions.
"""
return model.predict(test_data.dataset.data, num_iteration=model.best_iteration)
|
537683be363800389bdd1fd0e741e83f2f8a0ff2
| 658,806
|
def remove_http_https_prefix(endpoint):
"""remove http:// or https:// prefix if presented in endpoint"""
endpoint = endpoint.replace("https://", "")
endpoint = endpoint.replace("http://", "")
return endpoint
|
3ec56da4eae83fa7637fee2fe08b47d61513b30e
| 667,876
|
def ra_dec_format(val):
""" Ra/Dec string formatting
Converts the input string format of a right ascension/ declination coordinate
to one recognizable by astroquery
Args:
val (str): string; an ra/dec expression formatted as "005313.81 +130955.0".
Returns:
string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s"
"""
#ra
hour = val[0:2]
min_ = val[2:4]
sec = val[4:9]
ra = hour+'h'+min_+'m'+sec+'s'
#dec
deg = val[9:13]
min_d = val[13:15]
sec_d = val[15:]
dec = deg+'d'+min_d+'m'+sec_d+'s'
return ra+" "+dec
|
d96fc531f6c2a34f8d998343b8a9f3f757785773
| 445,245
|
def df_to_geojson(df, geometry, coordinates, properties):
"""
Convert a pandas DataFrame to a GeoJson dictionary.
Parameters
----------
df : pandas DataFrame
DataFrame containing the geojson's informations.
geometry : String
The type of the geometry (Point, Polygon, etc.).
coordinates : String
The DataFrame column's name of the coordinates.
properties : list of String eelements.
The DataFrame column's names of the properties attributes.
Returns
-------
geojson : Dict
GeoJson dictionary with the geometry, coordinates, and properties elements.
"""
geojson = {'type': 'FeatureCollection', 'features': []}
for _, row in df.iterrows():
feature = {'type':'Feature',
'properties':{},
'geometry': {'type': geometry,
'coordinates': coordinates}}
for prop in properties:
normalize_prop = prop.lower().split('.')[-1]
feature['properties'][normalize_prop] = row[prop]
geojson['features'].append(feature)
return geojson
|
75fa808dca18b89e2e259425b9b9e67bb8ac2415
| 16,221
|
def normalize_path(path: str) -> str:
"""
Normalizes a given path by ensuring it starts with a slash and does not end with a slash
"""
if path == "/":
return path
if not path.startswith("/"):
path = "/" + path
if path.endswith("/"):
path = path[: len(path) - 1]
return path
|
ef02d5dbb31832214ec4a97784c57bbdd0e993d1
| 411,497
|
def _IsInt(x):
"""Returns True if the input can be parsed as an int."""
try:
int(x)
return True
except ValueError:
return False
|
dab7b6a0765dee718a562dbde62f011082e8ea84
| 119,170
|
import yaml
def load_yaml(data):
"""
Load data from yaml string
:param data: Stringified yaml object
:type data: str | unicode
:return: Yaml data
:rtype: None | int | float | str | unicode | list | dict
"""
return yaml.load(data, yaml.FullLoader)
|
1f37e4b070732582fd74c0da2023e62f74b82d32
| 457,822
|
def reconstruct_path(came_from, current):
"""
Helper method for astar to determine path\n
@param came_from -> Cells searched while finding path\n
@param current -> Current node where snake head is\n
@returns total_path -> Reversed list of coordinates to goal
"""
total_path = [current]
while current in came_from.keys():
current = came_from[current]
total_path.append(current)
return list(reversed(total_path))
|
96bfd986c19795ee437bb7dc58bf103f4abbc246
| 619,583
|
def sql_like_decorate( pattern ):
""" wrap the given string with '%' if it is not already there """
if '%' not in pattern:
pattern = '%' + pattern + '%'
return pattern
|
ed36ba3d307a20298eb765e78df120af45e5dd43
| 584,118
|
def fs15f16(x):
"""Convert float to ICC s15Fixed16Number (as a Python ``int``)."""
return int(round(x * 2 ** 16))
|
a11ce834d6cdbf2adee012361509ca4fee9f2b3d
| 172,071
|
import math
def get_bounding_box(location, radius):
"""
Based on given location coordinates and radius in kilometers
returns coordinates of the bounding box.
"""
equator_len = 111
current_latitude_km_length = math.cos(location[0] * math.pi / 180) * equator_len
return {
"lat_min": location[0] - radius / equator_len,
"lat_max": location[0] + radius / equator_len,
"lon_min": location[1] - radius / current_latitude_km_length,
"lon_max": location[1] + radius / current_latitude_km_length,
}
|
d3f6e18d5a29f62ab216c79671664cec66ed29cf
| 88,132
|
from typing import Callable
from datetime import datetime
def parse_time(string: str, parser: Callable = datetime.strptime)->datetime:
"""
:param string: date and time as a string
:param parser: function to convert string to datetime
:return: datetime.datetime
"""
date_formats = ["%Y-%m-%dT%H:%M:%S.%fZ",
'%Y-%m-%dT%H:%M:%SZ']
for df in date_formats:
try:
return parser(string, df)
except ValueError:
pass
raise ValueError('Invalid time format in string %s' % string)
|
c8a937842cf8878a3442a53ae8fd5dc780404daf
| 49,225
|
def contains_lower(text: str) -> bool:
"""If text contains lower case characters, return True."""
for character in text:
if character.islower():
return True
return False
|
fd90eb9da95be52bcc03617545f7e418729752f6
| 189,759
|
def encased(message, marker):
"""Return string 'message' encased in specified marker at both ends.
>>> encased("hello", "**")
'**hello**'
:message: the string to be encased
:marker: the string to encase with
:returns: 'message' encased in markers
"""
return marker + message + marker
|
59fc24e93fd787d2f3d1f1b58ba396bec51608f8
| 388,542
|
import collections
def partition(lst, f, save_keys = False):
"""partition(lst, f, save_keys = False) -> list
Partitions an iterable into sublists using a function to specify which
group they belong to.
It works by calling `f` on every element and saving the results into
an :class:`collections.OrderedDict`.
Arguments:
lst: The iterable to partition
f(function): The function to use as the partitioner.
save_keys(bool): Set this to True, if you want the OrderedDict
returned instead of just the values
Example:
>>> partition([1,2,3,4,5], lambda x: x&1)
[[1, 3, 5], [2, 4]]
"""
d = collections.OrderedDict()
for l in lst:
c = f(l)
s = d.setdefault(c, [])
s.append(l)
if save_keys:
return d
else:
return d.values()
|
c49850504e0b4dddfe35d04faacd9f615a175a7d
| 179,823
|
def get_entity_and_id_colname(table):
"""Returns the entity and entity id column name from an id table,
assuming the entity id column name is f'{entity}_id',
and that this is the first (or only) column ending in '_id'.
"""
# id_colname = table.columns[table.columns.str.contains(r'\w+_id$')][0]
id_colname = table.filter(regex=r'\w+_id$').columns[0]
entity = id_colname[:-3]
return entity, id_colname
|
8f005b34e752b3150d6f17fb4fb07ede710fc06b
| 231,193
|
import hashlib
def validate_md5(local_path, md5):
"""
Computes the md5 checksum of a file and compares it to the passed md5
checksum
"""
with open(local_path, "rb") as in_f:
md5_file = hashlib.md5(in_f.read()).hexdigest()
if md5_file != md5:
return False
else:
return True
|
07b4228de6034ad475af0abf604bf2077812582d
| 269,899
|
def compare_int(entry, num):
"""Return True if the integer matches the line entry, False otherwise."""
if int(entry) == num:
return True
else:
return False
|
e779829b0d9a8343d3c48e6a66542e8e6ee62494
| 24,851
|
def geopoint_average(points):
"""Takes a list of lat-lng tuples and returns an average"""
count = len(points)
if not count:
return None
lat = 0
lng = 0
for point in points:
lat += point[0]
lng += point[1]
return (lat/count, lng/count)
|
34df75c0e838ed0dce519f085ef1f0227f07980f
| 316,444
|
def find_footer_cutoff(pg):
"""Search for a horizontal line separating footer from data."""
rects = [r for r in pg.rects if r["width"] / pg.width > 0.5]
max_rect = max(rects, key=lambda r: r["bottom"])
if max_rect["bottom"] > 0.9 * float(pg.height):
return max_rect
else:
return None
|
c6adf81296cde64c9eae541ba45b74624a69ea92
| 261,856
|
def get_f77_compiler_executable(compiler):
"""For any give FCompiler instance, this gives us the name of F77 compiler
(the actual executable)."""
return compiler.compiler_f77[0]
|
6c567fa22b5ee4dbcb5544807c8cb107a20a6c51
| 222,453
|
def bits_to_intervals(bits):
""" Converts bit numbers to bit intervals.
:returns: list of tuples: [(bitoffset_0, bitsize_0), ...]
"""
if not bits:
return []
bit_numbers = sorted(bits)
result = []
bitoffset = prev_bit = bit_numbers[0]
bitsize = 1
for bit in bit_numbers[1:]:
if bit - prev_bit == 1:
bitsize += 1
else:
result.append((bitoffset, bitsize))
bitoffset = bit
bitsize = 1
prev_bit = bit
result.append((bitoffset, bitsize))
return result
|
d723091be39aba4b722ccadf464d239c9fa60bd4
| 458,325
|
import struct
def Rf4ceMakeFCS(data):
"""
Returns a CRC that is the FCS for the frame (CRC-CCITT Kermit 16bit on the data given)
Implemented using pseudocode from: June 1986, Kermit Protocol Manual
https://www.kermitproject.org/kproto.pdf
"""
crc = 0
for i in range(0, len(data)):
c = ord(data[i])
q = (crc ^ c) & 15 #Do low-order 4 bits
crc = (crc // 16) ^ (q * 4225)
q = (crc ^ (c // 16)) & 15 #And high 4 bits
crc = (crc // 16) ^ (q * 4225)
return struct.pack('<H', crc)
|
81add4f980ded4e26b78252257933a191be8721c
| 21,654
|
def disable_trace(function):
"""
A decorator which suppresses the function execution logging
"""
function._trace_disable = True
return function
|
b7f94a023746b62893d34d884d9d3fae7b2809b5
| 502,440
|
import json
def agg_all(run_dir, solved, map_fct, agg_fct):
""" Calculate aggregates over all runs.
Args:
run_dir: directory containing benchmark results
solved: whether to consider only successful tries
map_fct: maps tries to values for aggregation
agg_fct: function used to aggregate values
Returns:
aggregated value over all tries
"""
values = []
#for model_id in ['cushman-codex', 'davinci-codex']:
for model_id in ['code-cushman-001', 'code-davinci-002']:
# for prompt_style in ['question', 'query', 'plan']:
for prompt_style in ['plan']:
for nr_samples in [0, 2, 4]:
run_id = f'{model_id}_{prompt_style}_S{nr_samples}_R2_T0.5'
result_path = f'{run_dir}/results_{run_id}.json'
try:
with open(result_path) as file:
data = json.load(file)
for tries in data.values():
for one_try in tries:
if not solved or one_try['similarity']==1.0:
value = map_fct(one_try)
if value is not None:
values.append(value)
except Exception as e:
print(e)
return agg_fct(values)
|
6cb1771c1aa322cee596f7bc3c3aba181c603a12
| 645,590
|
def _bencode_bytes(value, encoding='utf-8'):
""" Encode a bytestring (strings as UTF-8), eg 'hello' -> 5:hello """
if isinstance(value, str):
value = value.encode(encoding)
return str(len(value)).encode(encoding) + b':' + value
|
ee676971431074eaea3c313f12064ed324cae346
| 126,400
|
import ipaddress
import socket
def _matches_machine_hostname(host: str) -> bool:
"""Indicates whether ``host`` matches the hostname of this machine.
This function compares ``host`` to the hostname as well as to the IP
addresses of this machine. Note that it may return a false negative if this
machine has CNAME records beyond its FQDN or IP addresses assigned to
secondary NICs.
"""
if host == "localhost":
return True
try:
addr = ipaddress.ip_address(host)
except ValueError:
addr = None
if addr and addr.is_loopback:
return True
this_host = socket.gethostname()
if host == this_host:
return True
addr_list = socket.getaddrinfo(
this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME
)
for addr_info in addr_list:
# If we have an FQDN in the addr_info, compare it to `host`.
if addr_info[3] and addr_info[3] == host:
return True
# Otherwise if `host` represents an IP address, compare it to our IP
# address.
if addr and addr_info[4][0] == str(addr):
return True
return False
|
93a4111028aeb5d2341511d455c14c45e7161813
| 606,167
|
def is_port(port):
"""
returns true if the port is within the valid IPv4 range
"""
if port >= 0 and port <= 65535:
return True
return False
|
c45115d2ecbd92a2a52dfe6ca3e7af570c619cda
| 89,043
|
from unittest.mock import Mock
def mock_channel_file(
offered_by, channel_id, playlist_id, create_user_list=False, user_list_title=None
):
"""Mock video channel github file"""
content = f"""---
offered_by: {offered_by}
channel_id: {channel_id}
playlists:
- id: {playlist_id}
{"create_user_list: true" if create_user_list else "" }
{ "user_list_title: " + user_list_title if user_list_title else "" }
"""
return Mock(decoded_content=content)
|
3042a7fc6f0fb622db8bf035a408672ddd6408ab
| 698,695
|
def bin_title(bin):
"""The title of a Bin instance in text representations"""
return 'Sample @ ' + bin.iso8601time
|
08b7cfd64e26381d85483924bf2e7601db98d0f4
| 620,525
|
def mock_function_pass(*args):
"""
Mock a function that 'passes', i.e., returns a 0.
"""
print("\nmock> f({}) ==> 0".format(args)) # pragma: no cover
return 0 # pragma: no cover
|
2104d49fa8b811d246bc0d991cd558051028ef6f
| 360,707
|
from typing import OrderedDict
def alphabetically_sorted_dict(d):
"""
Returns a dictionary with all keys recursively sorted alphabetically
"""
ordered = OrderedDict()
for k, v in sorted(d.items()):
if isinstance(v, dict):
ordered[k] = alphabetically_sorted_dict(v)
else:
ordered[k] = v
return ordered
|
071d761d4d660480b34da954d3d0410bfacc5dfd
| 644,758
|
def justify_center(content, width, symbol):
"""Centers string in symbol - width chars wide."""
text = content
lines = text.split("\n")
for i in range(len(lines)):
lines[i] = lines[i].center(width, symbol)
text = "\n".join(lines)
return text
|
314061512596451158214a47c4bce5142531451a
| 446,479
|
def get_substituted_contents(contents, substitutions):
"""
Perform a list of substitutions and return the result.
contents: the starting string on which to beging substitutions
substitutions: list of Substitution objects to call, in order, with the
result of the previous substitution.
"""
result = contents
for sub in substitutions:
result = sub.apply_and_get_result(result)
return result
|
8deeb9428e699843f449ffa429bdf959569292ac
| 153,673
|
import math
def softmax(y_pred):
"""Normalizes a dictionary of predicted probabilities, in-place."""
exp = {c: math.exp(p) for c, p in y_pred.items()}
total = sum(exp.values())
return {c: exp[c] / total for c in y_pred}
|
44ada3f8ddc49cfd23e5002a9ffa49c1d45ad0dc
| 111,792
|
from typing import Any
from typing import Callable
def make_async(return_value: Any) -> Callable:
"""Wrap value into async function."""
# pydocstyle
async def fn(*args, **kwargs):
return return_value
return fn
|
59d0feb7aeb60194147a4b05ddaf76b356e2fb69
| 590,450
|
def strip_prefix(y, prefix):
"""Return y without the prefix."""
return {k[len(prefix):]: v for k, v in y.items() if k.startswith(prefix)}
|
7836b7a45e426cc31f9a74b561c06f770463c6c2
| 519,804
|
def has_type(cell, type):
"""
Selects cells with the given type
:param cell: Cell object to select
:param type:
:return: a bool object (True if cell should be selected)
"""
return cell.type == type
|
0f1609b990e6669003d8fe9329857d49764c2003
| 194,670
|
def list_files(locales, filename_func):
"""Returns the names of the files generated by filename_func for a list of
locales.
:param filename_func: function that generates a filename for a given locale
"""
files = []
for locale in locales:
files.append(filename_func(locale))
return " ".join(['"%s"' % x for x in files])
|
29af30f4d79ee6a76f5dc474fa7c9d9cd5a9b036
| 445,675
|
def _normalize_counts(counts, val=1):
"""Normalizes a dictionary of counts, such as those returned by _get_frequencies().
Args:
counts: a dictionary mapping value -> count.
val: the number the counts should add up to.
Returns:
dictionary of the same form as counts, except where the counts have been normalized to sum
to val.
"""
n = sum(counts.values())
frequencies = {}
for r in list(counts.keys()):
frequencies[r] = val * float(counts[r]) / float(n)
return frequencies
|
ed48e402c1c9fa22e264b3fc720acaa6dbff193d
| 74,374
|
def grad_argnums_wrapper(all_vjp_builder):
"""
A generic autograd helper funciton. Takes a function that
builds vjps for all arguments, and wraps it to return only required vjps.
"""
def build_selected_vjps(argnums, ans, combined_args, kwargs):
vjp_func = all_vjp_builder(ans, *combined_args, **kwargs)
def chosen_vjps(g):
# Return whichever vjps were asked for
all_vjps = vjp_func(g)
return [all_vjps[argnum] for argnum in argnums]
return chosen_vjps
return build_selected_vjps
|
282381aa2e4769edc2c797a5ae98af38b8d181cd
| 200,980
|
def __getitem__(self, index):
"""
Retrieve a field or slice of fields from the record using an index.
Args:
index: int or slice object
Index can be an integer or slice object for normal sequence
item access.
Returns:
If index is an integer the value of the field corresponding to
the index is returned. If index is a slice a list of field values
corresponding to the slice indices is returned.
"""
if isinstance(index, int):
return self._attr_getters[index](self)
# Slice object
return [getter(self) for getter in self._attr_getters[index]]
|
9a77450c8d9103dd88ce253983a638d21677d834
| 125,699
|
def scale(matrix, scale_x, scale_y):
"""Scale a matrix in list format."""
return [
[_item for _item in _row for _ in range(scale_x)]
for _row in matrix for _ in range(scale_y)
]
|
656f0df49e0581dd394876da4b2d1154cf40e8b9
| 377,605
|
import torch
def soft_threshold(x: torch.Tensor, threshold):
"""
Calculate the soft threshold of an input
Parameters
----------
x: torch.Tensor
input data
threshold: int, float, torch.Tensor
threshold to test against
Returns
-------
torch.Tensor with the soft threshold result
"""
hld = torch.abs(x) - threshold
y = torch.where(hld > 0, hld, torch.tensor(0.0, dtype=x.dtype))
y = y / (y + threshold) * x
return y
|
2405f38c7ea03846c87b268bc49e97ee9fa6c9ec
| 420,780
|
import math
def _deg_to_rad(deg):
""" Takes a degree value deg and returns the equivalent value in
radians. """
return deg * math.pi / 180
|
6a4e8bb9118935f21044c4e08f051de2b8ac294d
| 374,680
|
def strlen(s):
"""
Returns the length of string s using recursion.
Examples:
>>> strlen("input")
5
>>> strlen("")
0
>>> strlen('123456789')
9
>>> strlen("I love software engineering")
27
>>> strlen('a'*527)
527
"""
if s == "":
return 0
else:
return strlen(s[1:]) + 1
|
57b6697fdd3ff9406aba73418a35570c0b2ecf56
| 471,551
|
def intersect_trees(tree1, tree2):
"""Shrink two trees to contain only overlapping taxa.
Parameters
----------
tree1 : skbio.TreeNode
first tree to intersect
tree2 : skbio.TreeNode
second tree to intersect
Returns
-------
tuple of two TreeNodes
resulting trees containing only overlapping taxa
"""
taxa1 = [tip.name for tip in tree1.tips()]
taxa2 = [tip.name for tip in tree2.tips()]
n1, n2 = len(taxa1), len(taxa2)
taxa1, taxa2 = set(taxa1), set(taxa2)
if n1 != len(taxa1) or n2 != len(taxa2):
raise ValueError('Either tree has duplicated taxa.')
taxa_lap = taxa1.intersection(taxa2)
if len(taxa_lap) == 0:
raise KeyError('Trees have no overlapping taxa.')
tree1_lap = tree1.shear(taxa_lap)
tree2_lap = tree2.shear(taxa_lap)
return (tree1_lap, tree2_lap)
|
344ace1e867748f1db0b514b7b1339775cadbe4a
| 676,989
|
import torch
def rodrigues(theta, eps=1e-8):
"""Rodrigues, Axis Angle to Quaternion.
Args:
theta: tensor of shape `[..., 3]`.
Returns:
quaternion: of shape `[...,4]`
"""
angle = torch.norm(theta + eps, p=2, dim=-1, keepdim=True)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim=-1)
return quat
|
e68b2381573cbd309cddff47a5860fe572875753
| 387,220
|
def _split_heads(tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3)
|
d9483e496388523ac8c2d8bde433c2ac8a4a5ae5
| 257,712
|
def url_match(url1: str, url2: str) -> bool:
"""
This function takes two urls and check if they are the same modulo '/' at the end
:param url1: mandatory, the first url
:type url1: str
:param url2: mandatory, the second url
:type url2: str
:rtype: Boolean
"""
return url1 == url2 or url1[:-1] == url2 or url1 == url2[:-1]
|
c7c15ed1665c2837ca5ec7af402b6f0b72e41c0d
| 126,884
|
def printAtom( serial, molecule, atom, alchemicalTransformation):
"""Generate atom line
Parameters
----------
serial : int
Atom serial
alchemicalTransformation : bool
True if alchemical transformation
Returns
-------
atomLine : str
Atom line
"""
line = ''
if alchemicalTransformation:
line = ' %5d %10s %6d %6s %5s %6d %10.4f %10.4f%11s%11.4f%11.4f \n' % (serial, atom.type_gmx, 1,
molecule.residueName, atom.nameOriginal, 1, atom.charge, atom.mass, atom.type_gmx_B, atom.charge_B, atom.mass_B)
else:
line = ' %5d %10s %6d %6s %5s %6d %10.4f %10.4f \n' % (serial, atom.type_gmx, 1,
molecule.residueName, atom.nameOriginal, 1, atom.charge, atom.mass)
return line
|
52326352ccf2b592eabc517fc8d8528d32e97e37
| 60,661
|
def _GetTransferLockEnum(domains_messages):
"""Get TransferLockStateValueValuesEnum from api messages."""
return domains_messages.ManagementSettings.TransferLockStateValueValuesEnum
|
ca535ec78fcde90fcbe325e9ad117a18036e43a3
| 185,564
|
from typing import Dict
def substitute_tags(tag_map: Dict[str, str], text: str) -> str:
"""Substitute tags from the text for corresponding values in the map"""
for tag, value in tag_map.items():
text = text.replace('"' + tag + '"', value)
return text
|
2f6f019416411a537f30e09db0c18f1bddbb9412
| 186,169
|
def read1(filename):
""" read tex file into lines for processing
"""
f = open(filename)
lines = f.readlines()
f.close()
return lines
|
3987724a82b03b3b55872c1d0eec5a3bb35499db
| 105,343
|
def event(n):
"""Keep only the lowest byte of an integer.
This function is useful because bitwise operations in python
yield integers out of the range(128), which represents walk events."""
return n & 127
|
292745a791ee6dd2b869387b7cdbf7ecb1de7368
| 314,905
|
def get_file_contents(file='input.txt'):
"""Read all lines from file."""
with open(file) as f:
return [line.strip() for line in f.readlines()]
|
3607fd0257f60bcf2b386b5d2026a80165134a23
| 294,147
|
import hashlib
def double_sha256(msg):
"""sha256(sha256(msg)) -> bytes"""
return hashlib.sha256(hashlib.sha256(msg).digest()).digest()
|
635ad64649c34fb8a02f256f2f9c85470f9da7bb
| 141,173
|
def get_locations(df):
"""
Get the list of all locations present in the data source.
Parameters
----------
df : `~pandas.core.frame.DataFrame`
Astropy data table containing the navostats data
Returns
-------
list of str
Sorted unique location values present in the data.
"""
dfg = df.groupby(['location'])
locations = list(dfg.groups)
locations.sort()
return locations
|
c099c8867337b44e82674fa743143a9399aeb063
| 544,026
|
def make_header(s, char = '-'):
""" Make a rst header from the string s """
return '\n\n' + s + '\n' + char*len(s) + '\n\n'
|
06efe625e3b85a9a66bdb4d8e7ffbdf0c4c8e59f
| 323,236
|
import math
def bound_box(idx, w, length, size, overlap_pixel):
"""
Function that return the bounding box of a word given its index
Args:
ind: int, ind < number of words
Returns:
Bounding box(int[]): [h_low, h_high, w_low, w_high]
"""
assert idx < length, "Index Out of Bound"
num_bag_w = int((w - overlap_pixel) / (size - overlap_pixel))
box_h = int(math.floor(idx / num_bag_w) * (size - overlap_pixel))
box_w = int(idx % (num_bag_w) * (size - overlap_pixel))
return [box_h, box_h + size, box_w, box_w + size]
|
8a3349f69922401cc0c968a9c60a3f30d5315949
| 243,059
|
import typing
def _normalize_sample_plan(sample, plan) -> typing.Tuple[list, list]:
"""Normalize samples and plans to list of samples and plans
Parameters
----------
sample :
Sample metadata. If a beamtime object is linked, an integer will be interpreted as the index appears in
the ``bt.list()`` method, corresponding metadata will be passed. A customized dict can also be passed as
the sample metadata.
plan :
Scan plan. If a beamtime object is linked, an integer will be interpreted as the index appears in the
``bt.list()`` method, corresponding scan plan will be A generator or that yields ``Msg`` objects (or an
iterable that returns such a generator) can also be passed.
Returns
-------
sample :
The list of samples
plan :
The list of plans
"""
if isinstance(sample, list) and not isinstance(plan, list):
plan = [plan] * len(sample)
elif not isinstance(sample, list) and isinstance(plan, list):
sample = [sample] * len(plan)
elif not isinstance(sample, list) and not isinstance(plan, list):
plan = [plan]
sample = [sample]
if len(sample) != len(plan):
raise RuntimeError("Samples and Plans must be the same length")
return sample, plan
|
b7dc8ad86156d1354d3bbcab6f1e166a478794fb
| 617,007
|
def inside(resource1, resource2):
"""Is ``resource1`` 'inside' ``resource2``? Return ``True`` if so, else
``False``.
``resource1`` is 'inside' ``resource2`` if ``resource2`` is a
:term:`lineage` ancestor of ``resource1``. It is a lineage ancestor
if its parent (or one of its parent's parents, etc.) is an
ancestor.
"""
while resource1 is not None:
if resource1 is resource2:
return True
resource1 = resource1.__parent__
return False
|
906a05912bba8b299e42fdb3a3b4547a1b160bb4
| 17,011
|
def multiLevelConstantSampleNumber(inputDict, newLevels):
"""
Returns a list of sample numbers of same length as the number of levels of
deault hierarchy. Keeps constant the number of samples from defaultHierarchy if an
entry of newLevels exists in deafultHierarchy. If not, allocate a default
newSampleNumber to the entry.
"""
defaultHierarchy = inputDict['defaultHierarchy']
newSampleNumber = inputDict['newSampleNumber']
new_samples = []
for level in newLevels:
is_level_found = False
for defaultElement in defaultHierarchy:
if (level == defaultElement[0]):
new_samples.append(1*defaultElement[1])
is_level_found = True
break
if (is_level_found is False):
new_samples.append(newSampleNumber)
return new_samples
|
ca15eaca0721b7598bf56713e8eca86f3ad76faf
| 168,325
|
def load_catme_data_sections(path_to_file):
"""Returns a list of text sections from the CATME csv export."""
with open(path_to_file, 'r') as f:
text = f.read()
sections = text.split('\n\n')
return sections
|
2344825982ef993307cb841370b8b22f41bc44ba
| 381,451
|
import csv
def get_data_names(file_name):
"""
Returns just the data names in the first row of the CSV
"""
with open(file_name, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
row_iter = 0
# reader is iterable not a list, probably a better way to do this
for row in csv_reader:
# Return names from first row
return row[0].split(',')
|
9f45122f0dc18652e6ea3253c021a08ecbede3ec
| 315,557
|
def get_pred_dict(pred_name):
"""
Create a dict mapping predicate names to indices
:param pred_name: list of pred names, in order
:return: dict
"""
return {p:i for i,p in enumerate(pred_name)}
|
738a0628372ba7bd6a7a0e9f8f0fbe56a2a19eaa
| 192,595
|
def ignore_exception(*exception_classes):
"""
A function decorator to catch exceptions and pass.
@ignore_exception(ValueError, ZeroDivisionError)
def my_function():
...
Note that this functionality should only be used when you don't care about
the return value of the function, as it will return `None` if an exception
is caught.
"""
def decorator(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (exception_classes):
pass
return func_wrapper
return decorator
|
2e007f124c84843c0abf0956a1b2b7326d7b4ec7
| 644,303
|
from typing import Set
def allocate_mid(mids: Set[str]) -> str:
"""
Allocate a MID which has not been used yet.
"""
i = 0
while True:
mid = str(i)
if mid not in mids:
mids.add(mid)
return mid
i += 1
|
994b871a19edde7d8551dc641884e973f427889d
| 683,478
|
def prefetch_category(pets):
"""Prefetch category related to pets in queryset."""
return pets.prefetch_related('category')
|
fddd6e9cac71154c3ebee7728b159646ef5a2b7e
| 573,954
|
def stringListToFloat(stringList):
"""Converts a list with strings into a list with floats."""
return [float(singleFloatResult) for singleFloatResult in stringList]
|
637bc6319cdf5d8b2fdb635d7ef3b281c0b851aa
| 135,543
|
def is_bond_member(yaml, ifname):
"""Returns True if this interface is a member of a BondEthernet."""
if not "bondethernets" in yaml:
return False
for _bond, iface in yaml["bondethernets"].items():
if not "interfaces" in iface:
continue
if ifname in iface["interfaces"]:
return True
return False
|
521186221f2d0135ebcf1edad8c002945a56da26
| 827
|
def force_slashend(path):
"""
Return ``path`` suffixed with ``/`` (path is unchanged if it is already
suffixed with ``/``).
"""
if not path.endswith('/'):
path = path + '/'
return path
|
2e2be0dbb88fb380e581f49af532ea7b5724d918
| 10,498
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.