content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def is_fund_code(s: str) -> bool:
"""Check if the string represents a valid fund code"""
return bool(re.fullmatch(r"[0-9]{6}", s)) | 2fe7214c4e4685dd1052e869625b4689577e2fca | 111,045 |
def required_get(data: dict, field: str):
"""
Tries to obtain the field from the dictionary and throws the
error in case it was not found.
"""
try:
return data[field]
except KeyError as ex:
raise Exception(f"no required key {field} found") from ex | 99f6d67c8ae7c7f32cd973996db46536062e8f7d | 111,048 |
def _form_c_loop_end(_):
"""Form the loop ending for C."""
return '}' | 0f31ca4f59227535e4ccf0159c39c333b724b194 | 111,049 |
import smtplib
def sendEmail(
sendAddr: str,
password: str,
recvAddr: str,
body: str,
server: str,
port: int,
sub: str = "No Subject",
) -> bool:
"""Sends an email using given arguments"""
with smtplib.SMTP(server, port) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(sendAddr, password)
subMsg: str = "Subject: {}".format(sub)
bodyMsg: str = "\n\n {}".format(body)
finalMsg: str = subMsg + bodyMsg
smtp.sendmail(sendAddr, recvAddr, finalMsg)
return True | 40c3e5402b79658c4906d1f31c07961d495e75fa | 111,050 |
def _get_headers(params):
"""
Return HTTP headers required.
"""
return {"Authorization": "OAuth {oauth}".format(oauth=params["api_key"])} | 3c405a7f6b625d2cf7354010baa33c21605ff9f1 | 111,051 |
def dict_builder(day_list, workout_list, descrip_list):
"""
Takes the lists and creates a dictionary of {day: [workout, distance/pace, description]}
:param day_list:
:param workout_list:
:param descrip_list:
:return:
"""
response_dict = dict()
for num, day in enumerate(day_list):
response_dict[day] = [workout_list[num], descrip_list[num][0], descrip_list[num][1]]
return response_dict | 59a4f1bbea17fc9d0959d861e2bd8cc0db48cff2 | 111,055 |
import re
def camel_case(name):
"""
Turns a hyphenated word into camel-case
:param name: string to convert
"""
name = re.sub(r'-(\w)', lambda match: match.group(1).upper(), name.lower())
return name[0].upper() + name[1:] | 8e109e6d4bb7f6c8d0c4d422a66c23444d688d40 | 111,058 |
def get_chuck_norris_gif(status):
"""
Return a gif link based on good or bad build status
"""
return "".format(status) | 67ca1d6916a0e96f9697d641c76d029367b3e50d | 111,068 |
def _get_info_path(path):
"""Returns path (`str`) of INFO file associated with resource at path."""
return '%s.INFO' % path | 2fb8f4f87ea94c16dc8dc568f46dead66c77f8d3 | 111,073 |
def update_config_with_model_dims(data_loader, config):
"""Updates options by adding the dimension of input features as the dimension of first hidden layer of the model"""
# Get the first batch (data is in dictionary format)
x, y = next(iter(data_loader.train_loader))
# Get the features and turn them into numpy.
xi = x.cpu().numpy()
# Get the number of features
dim = xi.shape[-1]
# Update the dims of model architecture by adding the number of features as the first dimension
config["dims"].insert(0, dim)
return config | 75adf133f5e29eb2a43fad98ce02aabfef18faee | 111,080 |
def obj_successfail(succeeded):
"""Return a styled span to show if an antag was successful or not.
Keyword arguments:
succeeded -- Boolean. Did the antag win?
"""
if succeeded:
return "<span class='objective success'>Success</span>"
else:
return "<span class='objective failure'>Failure</span>" | 5ef8a28b01d9f87218ee573fba0b40cac9679f0f | 111,081 |
def transform_pauli_moments_to_bit(mean_p, var_p):
"""
Changes the first of a Pauli operator to the moments of a bit (a Bernoulli process).
E.g. if the original mean is on [-1, +1] the returned mean is on [0, 1].
:param mean_p: mean of some Pauli operator
:param var_p: variance of a Pauli operator
:return: bit mean and variance.
"""
mean_out = (mean_p + 1) / 2
var_out = var_p / 4
return mean_out, var_out | a1137bc0bded392a6d0bc3f28c1a71dbf000a5a8 | 111,084 |
def linearstep(edge0: float, edge1: float, x: float) -> float:
"""A linear transition function.
Returns a value that linearly moves from 0 to 1 as we go between edges.
Values outside of the range return 0 or 1.
"""
return max(0.0, min(1.0, (x - edge0) / (edge1 - edge0))) | 1659a636f6b9650cf0e942bf58745b8a6b189e71 | 111,086 |
def choose(n, r):
"""
Returns the value of nCr
"""
if r > n//2 + 1:
r = n - r
numerator = 1
denominator = 1
for i in range(n, n - r, -1):
numerator *= i
denominator *= (n - i + 1)
return(numerator//denominator) | 24226cad4fc81bda8609d3284c2eeab997f5b355 | 111,087 |
def report_definition(connection, report_id):
"""Get the definition of a specific report, including attributes and
metrics. This in-memory report definition provides information about all
available objects without actually running any data query/report. The
results can be used by other requests to help filter large datasets and
retrieve values dynamically, helping with performance and scalability.
Args:
connection: MicroStrategy REST API connection object
report_id (str): Unique ID of the report
Returns:
Complete HTTP response object.
"""
connection._validate_project_selected()
return connection.session.get(url=f'{connection.base_url}/api/v2/reports/{report_id}') | c290a9a4638101553f546672786d994600d998c4 | 111,088 |
import uuid
def additional_headers(request):
"""Set additional headers and optionally add prefer header"""
headers = {"X-Request-ID": str(uuid.uuid1()), "X-Correlation-ID": str(uuid.uuid1())}
if request.param["prefer"] is True:
headers["Prefer"] = "respond-async"
return headers | e4f28bed874fb9c2d06645abab28d28c83b94ede | 111,089 |
import click
def read_excel_data(ctx):
"""Reads excel data from data directory
Args:
ctx (Context): Context object
Returns:
list: Excel data list
Raises:
click.ClickException: If no Excel data found
"""
excel_data_list = ctx.obj.svc_importer.read_excel_data()
if not bool(excel_data_list):
raise click.ClickException("No Excel data found")
click.echo("Excel data list: {}".format(excel_data_list))
return excel_data_list | 2e94886d7f11799bc8b3f1569cab47e8f6e6dd84 | 111,091 |
def _original_label(original_data, threshold, drift_score, window_size, step_size):
"""
To obtain a original drift label of time series.
Args:
original_data(numpy.ndarray): The input data.
threshold(float): The drift threshold.
drift_score(numpy.ndarray): The drift score of the input data.
window_size(int): Size of a concept window.
Usually 3 periods of the input data if it is periodic.
step_size(int): The jump length of the sliding window.
Returns:
- list, the drift label of input data.
0 means no drift, and 1 means drift happens.
- list, the locations of drifts(x-axis).
"""
label = []
label_location = []
# Label: label=0, no drifts; label=1, drift happens.
for i in range(0, len(original_data) - 2*window_size, step_size):
label_location.append(i + window_size)
if drift_score[i + window_size] >= threshold:
label.append(1)
else:
label.append(0)
return label, label_location | 6dbc650151b3a471fcc1b2b35eeff870e0e292a6 | 111,094 |
from typing import Counter
def get_modes(empiric_distribution, at_least_total=10):
"""
Get all values which are at least at_least_total
times in the data.
The most common value does not have to have at_least_total apearences in
the data.
Parameters
----------
empiric_distribution : list
List of integers
at_least_total : int
"""
modes = []
s = sorted(Counter(empiric_distribution).items(),
key=lambda n: n[1],
reverse=True)
total = float(len(s))
for stroke_count, appearences in s:
constrain1 = (stroke_count >= at_least_total and
appearences/total >= at_least_total)
if constrain1 or len(modes) == 0:
modes.append(stroke_count)
return modes | f5e957ba853a22500264dfe56abea165d922c106 | 111,097 |
def dpdx(a, x, y, order=4):
"""Differential with respect to x
The polynomial is defined as p(x,y) = a[i,j] * x**(i-j) * y**j summed over i and j
Then dp/dx = (i-j) * a[i,j] * x**(i-j-1) * y**j
Parameters
----------
a a linear array of polynomial coefficients in JWST order.
x an integer or float variable(or an array of same) representing pixel x positions
y a variable (or an array) representing pixel y positions. x and y must be of the same shape.
order an integer, the polynomal order
Returns
-------
dpdx float value of dp/dx for the given (x,y) point(s)
"""
dpdx = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if i - j > 0:
dpdx = dpdx + (i - j) * a[k] * x**(i - j - 1) * y**j
k += 1
return dpdx | a744ae4ec5e7ed2e6d4b71dbe3bb5d1b664fb247 | 111,099 |
def read_gap_table(table_path, target_chromosome_name, size_cutoff=0):
"""
Read tsv file describing gap locations with format convention:
#bin chrom chromStart chromEnd ...
23 chr1 122503247 124785432 ...
...
:param table_path:
:param target_chromosome_name:
:return:
"""
coordinates = list()
with open(table_path, "r") as file:
for l,line in enumerate(file):
if l == 0:
continue
if line.isspace():
continue
line = line.strip().split("\t")
chromosome_name = line[1]
start = line[2]
end = line[3]
type = line[-2]
start = int(start)
end = int(end)
size = end - start
if chromosome_name == target_chromosome_name:
if size > size_cutoff:
coordinates.append([start,end])
return coordinates | 4cfe1617b5dbff6af5a63f34b636da618faad458 | 111,108 |
def elapsed_time(variable, time_stamp):
"""
Simplifies the time axis, by subtracting the initial time.
This is useful because usually the time stamps are given in a long format (i.e. in the order of 1e9)
:param variable: Reference variable to obtain the size of the time array
:param time_stamp: The time stamp array that is going to be simplified
:return: Simplified time as Elapsed Time
"""
elapsed = [None] * len(variable)
for i in range(len(variable)):
elapsed[i] = time_stamp[i] - time_stamp[0]
return elapsed | 2d055a63350ada5fe606016c3f3272823a62285f | 111,110 |
def coverage_command_fixture(request):
"""Parametrized fixture to use multiple forms of "coverage" command."""
return request.param | d64e12698cae8d35f5d2989ef7fd8dda3430b543 | 111,118 |
def add_ner_prompts(df, prompt_config, sep):
"""
Combining sentences and entities to create prompts in Dataframe with 'sents' and 'entities' columns.
Adds 'prompts' and 'empty_prompts' (prompt without answer) columns to DataFrame
"""
prompts = []
empty_prompts = []
prompt_sample_structure = prompt_config['sent_intro'] + ' {}\n' + prompt_config['retrieval_message'] + ' {}'
empty_prompt_sample_structure = prompt_config['sent_intro'] + ' {}\n' + prompt_config['retrieval_message']
for i, row in df.iterrows():
sent = row['sents']
entities = sep.join(row['entities'])
prompt = prompt_sample_structure.format(sent, entities)
empty_prompt = empty_prompt_sample_structure.format(sent)
prompts.append(prompt)
empty_prompts.append(empty_prompt)
df['prompts'] = prompts
df['empty_prompts'] = empty_prompts
return df | a43f4f255872a139b3273ec3322699105d91806c | 111,133 |
from typing import Tuple
def transform_rc(row: int, col: int, affine: tuple) -> Tuple[int, int]:
"""
Perform the affine transformation from a row/col coordinate to projected x/y
space.
Args:
row: pixel/array row number
col: pixel/array column number
affine: gdal GeoTransform tuple
Returns:
x/y coordinate
"""
x = affine[0] + col * affine[1] + row * affine[2]
y = affine[3] + col * affine[4] + row * affine[5]
return x, y | 13d08a7929a63521457c0d3ccd4f3601831397b2 | 111,137 |
def cards_left(instance):
""" Return the number of cards left to deal before a shuffle is required.
Args:
instance: The GameInstance database model for this operation.
Returns
The number of cards that can still be dealt before the deck is
empty. If the deck has not been set or no cards have been dealt
(in the case that the default deck is being used), returns -1.
"""
if 'crd_deck' not in instance.dynamic_properties():
return -1
return len(instance.crd_deck) - instance.crd_deck_index | 5ef313a22aff0ae1799c2bf86890fcc1e70d4698 | 111,138 |
def pass_single(_, nodes):
"""
Unpack single value and pass up.
"""
return nodes[0] | f60e7f771301332998faacef31e9868e9cd80387 | 111,142 |
import struct
def is_protocol_header(bytes_read: bytes):
"""Check if a a byte stream is an AMQP protocol header frame
:param bytes_read: data read from a socket
"""
_bytes = struct.unpack("ccccBBBB", bytes_read)
return b"AMQP" == b"".join(list(_bytes)[:4]) | 6ddb3b982a447b179e218fea4a849fa9faf8679e | 111,145 |
import math
def DCG(relevance_scores, k=14):
""" Function for computing the discounted cumulative gain for a given
list of relevance scores ordered by rank within result list.
Examples
--------
>>> DCG([3, 2, 3, 0, 1, 2])
8.097171433256849
>>> DCG([3, 2, 3, 0, 1, 2], k=2)
5.0
"""
relevance_scores = relevance_scores[:k]
DCG = relevance_scores[0] # rel_1
for i, relevance_score in enumerate(relevance_scores[1:]):
position = i + 2 # position in relevance list
DCG += relevance_score / math.log(position, 2)
return DCG | baaa171dfbefa32a464163441a8e5d79136d2464 | 111,150 |
import re
def single_to_double_newlines(text):
"""Converts single newlines to double newlines."""
return re.sub(r'[\n\r]+', r'\n\n', str(text)) | 5ef01fc270b3788d8ad8714fb953b9d3c41f28f4 | 111,158 |
import math
def puct_exploration_bonus(child_count, parent_count):
"""PUCT exploration bonus.
A variant with weight changing over time is used in AlphaZero.
Args:
child_count (int): Number of visits in the child node so far.
parent_count (int): Number of visits in the parent node so far.
Returns:
float: Exploration bonus to apply to the child.
"""
return math.sqrt(parent_count) / (child_count + 1) | 8c05227ee4866ddd19c2e8194fd0ecdbc87c91ba | 111,159 |
import math
def angular_momentum(m, v, r, theta):
"""
Calculates the angular momentum of an object
of mass 'm' whose linear velocity is 'v' and
radius of the path traced by the object is 'r',
angle between velocity and radius is 'theta'
Parameters
----------
m : float
v : float
r : float
theta : float
Returns
-------
float
"""
return m * v * r * math.sin(theta) | a37ec10b1375e4b14c8a27e938c7e63839518509 | 111,160 |
def get_qualified_name(project):
"""
Returns the qualified suffix name for the project
:param project: the project
:return: the qualified name
"""
d = {'cassandra': 'org.apache.cassandra',
'dagger': 'dagger',
'guava': 'com.google.common',
'ivy': 'org.apache.ivy',
'lang': 'org.apache.commons.lang',
'math': 'org.apache.commons.math',
'time': 'org.joda.time'}
return d[project] | 617c33ae14f537f653d24c8dba06c1ee2cc2a749 | 111,163 |
from typing import Counter
from typing import Dict
def sort_counter(counter: Counter, alphabetical: bool) -> Dict[str, int]:
"""Sort counter according to custom logic.
Args:
counter: Imported packages and their corresponding count
alphabetical: Whether to sort counter alphabetically
Returns:
Sorted counter
"""
def custom_order(tup):
# Sort first by count (descending), and then by name
return -tup[1], tup[0]
sort_key = None if alphabetical else custom_order
return dict(sorted(counter.items(), key=sort_key)) | 4ccac25f84e0055dd768ead52cb2b8ab79839e59 | 111,166 |
def _build_download_url(template, version):
"""
Builds the software release download url.
:param template: url template
:type template: str
:param version: software release
:type version: str
"""
return template.format(version=version) | 388352339ef01b73172beee376046ea2b096865f | 111,168 |
def change_company_names(df):
"""Change LähiTapiola and Tapiola into Elo"""
m = (df['Yhtiö'] == 'LähiTapiola') | (df['Yhtiö'] == 'Tapiola')
df.loc[m, 'Yhtiö'] = 'Elo'
return df | 73a67fd7289af9a979f69b70c4cc20d2c9680afc | 111,170 |
import ast
def extract_dictionary_values(word, polarity_dict):
""" Helper function to extract polarity for a word from dictionary
Args:
word (str):
polarity_dict (dictionary): Dictionary with values from SentiMerge
Returns:
Values as list
Example:
>>> extract_dictionary_values('Aal', {'Aal': '-0.017532794118768923'})
-0.017532794118768923
"""
if word in polarity_dict.keys():
value = polarity_dict[word]
try:
v = ast.literal_eval(value)
return v
except ValueError:
return value
else:
return 0 | b58cf5872bb3726406cfbd2e237d45dbd8de4743 | 111,171 |
def primary_id(data, location):
"""
Generate a primary key for the given data and location.
"""
start = min(int(e["start"]) for e in location["exons"])
stop = max(int(e["stop"]) for e in location["exons"])
return "{gene}:{accession}:{start}-{stop}".format(
gene=data["gene"],
accession=location["exons"][0]["INSDC_accession"],
start=start,
stop=stop,
) | 6f041e15197dcc7ef44afd22606f3c465c84e7df | 111,174 |
def get_options_config_if_fewer_than_five_hundred(column_values):
"""
If there are fewer than 500 unique values for a column, return an Options configuration dictionary
:param column_values:
:return:
"""
unique_dict = {}
for value in column_values:
unique_dict[value] = True
uniques = list(unique_dict.keys())
if len(unique_dict.values()) < 500:
return {
'type': 'options',
'options': uniques
} | 7aaae7d633d374a941593decc0cc6d5f1e8a7ecb | 111,181 |
from datetime import datetime
def get_tm1_time_value_now(use_excel_serial_date: bool = False) -> float:
"""
This function can be used to replicate TM1's NOW function
to return current date/time value in serial number format.
:param use_excel_serial_date: Boolean
:return: serial number
"""
# timestamp according to tm1
start_datetime = datetime(1899, 12, 30) if use_excel_serial_date else datetime(1960, 1, 1)
current_datetime = datetime.now()
delta = current_datetime - start_datetime
return delta.days + (delta.seconds / 86400) | fc3154fafe991b517e3464ea783ad6b985ad06de | 111,182 |
def sort_trades_by_time(trades):
"""
Sorts list of trades by time (earliest first).
"""
return sorted(trades, key=lambda trade: trade.timestamp) | 5a8f8415457d803cd5c461812e8e59f1328ba326 | 111,187 |
def strip_port_from_host(host):
"""
Strips port number from host
"""
return host.split(':')[0] | 033b5e1725e85b9f6945bdc114c3ae4309ef78a8 | 111,188 |
import torch
def is_activation_to_checkpoint(item):
"""
Is an activation to be checkpointed
"""
return torch.is_tensor(item) and item.is_floating_point() | 6fbba100d7fafba433654aa7335fa9e12654a1d3 | 111,191 |
def get_val(val_str):
"""Converts a string into a, integer, float, or string."""
try:
val = int(val_str)
return val
except ValueError:
try:
val = float(val_str)
return val
except ValueError:
return val_str | 5790f626f9de574bead6bed3348ad3b3a641de4f | 111,195 |
def get_event_from_player(p):
"""Return event identifier from player identifier."""
return (
p[1],
p[2],
p[3],
) | f89646aab831f5fdfc6a2696507e37108afb6779 | 111,200 |
import ast
def getLastSavedWellName(path2json):
"""
Iterates over the lines of the json file - each line is the data of another well, and returns the name of last well
that was saved.
Parameters
----------
path2json: str
Path to json file that is used to save the experiment's data
Returns
-------
last_well_name: str
The name of the last well that was saved (well name of the form: "B - 02")
"""
# extract the last line of the json file which is the data of the last well as a dictionary
with open(path2json, "r") as file:
lastline = (list(file)[-1])
last_dict = ast.literal_eval(lastline)
# extract the name from the dictionary
last_well_name = list(last_dict.keys())[0]
return last_well_name | 0daf0a05027a43116b86e493e15f41d597581e7c | 111,201 |
def is_log_handler(logger, handler):
"""
Determines if a given (named) Handler is assigned to a Logger.
:param logging.Logger logger: a logger object
:param logging.Handler handler: a handler object
:returns: result
:rtype: bool
"""
found = False
if handler.name is None:
logger.warning("Request to validate Handler with no name assigned to Logger may result in duplicate Handlers")
for h in logger.handlers:
if h.name == handler.name:
found = True
break
return found | 21a16a8a949d846776f0e733a4c7f2b4fe4ad630 | 111,205 |
def remaining_G7_8_cap(k12):
"""
Remaining enrollment capacity available for grades 7-8.
"""
return (k12['G7_8_cap'] - k12['G7_8']).clip(0) | 901faee930f7f7b5b885f2852f0fe4c13dc289a8 | 111,206 |
def _reduce_redundancy(text):
"""
Takes in text that has been cleaned by the _base_clean and uses set to reduce the repeating words
giving only a single word that is needed.
"""
return list(set(text)) | 2afaa0f70045226df695cd8e7000e44ee6bee76d | 111,210 |
import struct
def make_short_bytes(value):
"""Convert value into a big-endian unsigned int."""
return struct.pack('>H', value) | 0be5ca2fbae30e3bced7a881da9a0bf938860bf6 | 111,211 |
from typing import Iterator
from pathlib import Path
def _get_notebooks(root_dir: str) -> Iterator[Path]:
"""
Get generator with all notebooks in directory.
Parameters
----------
root_dir
Notebook or directory to run third-party tool on.
Returns
-------
notebooks
All Jupyter Notebooks found in directory.
"""
if not Path(root_dir).is_dir():
return (i for i in (Path(root_dir),))
notebooks = (
i for i in Path(root_dir).rglob("*.ipynb") if ".ipynb_checkpoints" not in str(i)
)
return notebooks | 135671a9e3e5d37210a581f7b6cc6b7f122c8ac4 | 111,212 |
def head_of_list(x):
"""Takes a list, returns the first item in that list.
If x is empty, return None
>>> head_of_list([1, 2, 3, 4])
1
>>> head_of_list([]) is None
True
"""
return x[0] if x else None | 63939c21cac53eb756565716518b13d1c6224d9b | 111,215 |
def colfil2scan(col, fil, x0, y0, scale):
"""
Transforma filas/columnas de la imagen a x/y en proyeccion geoestacionaria
En base a 5.2.8.2 de PUG3
Parameters
----------
col : int, int arr
columna
fil : int, int arr
fila
x0 : float
posición del x[0] en radianes
y0 : float
coordenada horizontal del primer punto, en radianes.
Paralelo al eje terrestre
scale : float
tamaño del pixel en radianes
Returns
-------
x : float, float arr
coordenada horizontal, en radianes.
y : float, float arr
coordenada vertical, en radianes. Paralelo al eje terrestre
"""
x = col * scale + x0
y = -fil * scale + y0
return x, y | 30d200e951e20e13a151c1c953214f0e0b2bb09a | 111,216 |
from typing import List
import platform
def get_cmake_in_package_install_args() -> List[str]:
"""Return default installation settings for installing libecole in the package."""
system = platform.system()
if system == "Linux":
origin = r"\${ORIGIN}"
elif system == "Darwin":
origin = "@loader_path"
else:
raise NotImplementedError(f"OS {system} is not supported")
return [
"-DBUILD_SHARED_LIBS=ON",
"-DCMAKE_INSTALL_LIBDIR=lib",
"-DCMAKE_INSTALL_BINDIR=bin",
"-DCMAKE_INSTALL_INCLUDEDIR=include",
"-DECOLE_PY_EXT_INSTALL_LIBDIR='.'",
"-DECOLE_PY_EXT_INSTALL_RPATH={origin}/lib".format(origin=origin),
] | 5b09ecb3b3a17b6d0fafb53845f4157eb4d36066 | 111,218 |
from typing import Mapping
from typing import OrderedDict
import re
def replace_multiple(s: str, replacements: Mapping[str, str]) -> str:
"""Replace multiple strings at once. If multiple replacements overlap the precedence is given by the order in
replacements.
For pyver >= 3.6 (otherwise use OrderedDict)
>>> assert replace_multiple('asdf', {'asd': '1', 'asdf', '2'}) == 'asd1'
>>> assert replace_multiple('asdf', {'asdf': '2', 'asd', '1'}) == '2'
"""
rep = OrderedDict((re.escape(k), v) for k, v in replacements.items())
pattern = re.compile("|".join(rep.keys()))
return pattern.sub(lambda m: rep[re.escape(m.group(0))], s) | 62c62d04f656ebe61540af621e6fa8e49c64ba4f | 111,222 |
import struct
def unpack32(data):
"""struct.unpack 32-bit unsigned int"""
return struct.unpack('<I', data)[0] | 0f79bedd00164dec55d87dfd0abe46c041fdba1a | 111,223 |
def extract_url_information(url):
"""Return a dictionary containing URL-specific information."""
protocol = url[:url.index(':')].replace('&', '&')
post_protocol = url.index('//')
post_domain = url.index('/', post_protocol + 2)
domain = url[post_protocol + 2:post_domain]
path = url[post_domain:].replace('&', '&')
return {'url': url.replace('&', '&'),
'path': path,
'domain': domain,
'protocol': protocol} | 09de8e8e406ac033542b626405462251b22bca3c | 111,227 |
def ihead(store, n=1):
"""Get the first item of an iterable, or a list of the first n items"""
if n == 1:
for item in iter(store):
return item
else:
return [item for i, item in enumerate(store) if i < n] | 6a72ee47e1416b751b1725e5f7448fa408169f59 | 111,231 |
import struct
def dict_from_packet(bytes):
""" Returns dict of the ICMP header fields from an ICMP packet """
names = [ "type", "code", "checksum", "id", "sequence" ]
struct_format = "!BBHHH"
data = bytes[20:28]
unpacked_data = struct.unpack(struct_format, data)
return dict(zip(names, unpacked_data)) | 5dc5441f1a8c81ac3051e69599119b65f25cc872 | 111,233 |
def ub8(b):
"""Convert bytes to an 8-bit integer.
Example:
ub8(b'\x12') == 0x12
"""
return b[0] | 1b054a390320433bf3e1ed3d92a2aef0fbb338d2 | 111,234 |
def is_palindromic(n):
"""Return True iff the given integer is palindromic."""
xs = str(n)
return xs == "".join(reversed(xs)) | 8af5edd115d1f785c82356852ffc70cc770ffc71 | 111,239 |
def safe_import(origin, funk1, funk2):
"""Safely import a function whose name was changed from a module whose name was not.
This function is specially useful if the function name is not known at runtime.
Args:
origin (str): name of the module where the function to be imported is located
funk1 (str): name of the first function name to try to import
funk2 (str): name of the second function name to try to import
Returns:
function: function object imported from `origin`
Example:
# instead of writting this
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
# write this
filterfalse = safe_import('itertools', 'filterfalse', 'ifilterfalse')
"""
try:
hook = __import__(origin, globals(), locals(), [funk1], 0)
return getattr(hook, funk1)
except:
hook = __import__(origin, globals(), locals(), [funk2], 0)
return getattr(hook, funk2) | 6b28ff40fb9546aa553568c79dc593bdbd38ac6d | 111,244 |
def _self_interactions(num_qubits):
"""Return the indices corresponding to the self-interactions."""
interactions = []
for qubit in range(num_qubits):
for pindex in range(1, 4):
term = [0] * num_qubits
term[qubit] = pindex
interactions.append(tuple(term))
return interactions | e2f641698a2add4d5b8ddf5ac5a0ff6f7855de49 | 111,245 |
def readable_bytes(num_bytes):
"""Converts number of bytes to readable units.
Args:
num_bytes (int): Number of bytes.
Returns:
num_bytes (float): Number of bytes in the appropriate unit
(B, KB, MB, or GB).
unit (string): The appropriate unit.
"""
units = ['B', 'KB', 'MB', 'GB']
idx = 0
while int(num_bytes / 1024):
num_bytes /= 1024
idx += 1
if idx == (len(units) - 1):
break
unit = units[idx]
return num_bytes, unit | cc5258bbb3344d3ce08b17fa82d2edc236da7845 | 111,250 |
def size(image):
"""
Return the size `(X, Y)` of the given image.
"""
return (image.shape[1], image.shape[0]) | 15e35bdc84faac40e9ff9092d3162b6914a05f95 | 111,251 |
def tab(text, n=1):
"""
Indent generated code by `n` 4-space indents.
"""
lines = text.split('\n')
lines = [(' ' * n) + line for line in lines]
return '\n'.join(lines) | 92e93f820a90cd46cdf8b63fa5d794c90d6ac56c | 111,255 |
def describe(data_matrix):
"""Get the shape of a sparse matrix and its average nnz."""
return 'Instances: %3d ; Features: %d with an avg of %d per instance' % \
(data_matrix.shape[0], data_matrix.shape[1],
data_matrix.getnnz() / data_matrix.shape[0]) | 98cd820dd9c2728a5ba1a59693ca480cb3c88f13 | 111,256 |
def increment(x):
"""increments x by 1"""
return(x + 1) | 89310d1f1f3d7bfc6eb4e1c304d625a435ea8cab | 111,259 |
import re
def extractYearMonthDate(url):
"""Assumes url is a string, representing a url with a full date
returns re match object, representing the full date from the url"""
pattern = "\d{4}/\d{2}/\d{2}"
result = re.search(pattern, url)
return result | 4ec18936ab43368aba09e49d1699ab26a4d57e8f | 111,263 |
def build(name, builder):
"""Wrapper to turn (name, ctx) -> val method signatures into (ctx) -> val."""
return lambda ctx: builder(name, ctx) | 2a09dc8685f8423b61c8ca70cde7746054af8384 | 111,264 |
def get_matching_secrets_id(secrets_cp, submod, main_id):
"""
Retrieves the section name (ID) for in the .secrets.conf that matches the
submodule and main config ID provided.
Args:
secrets_cp (ConfigParser): A config parser for the .secrets.conf file
already loaded.
submod (str): The name of the submodule that should be the prefix in the
section name for this in the .secrets.conf file.
main_id (str): The name of section from the relevant submodule's config to
ID this element.
Returns:
(str or None): The name of the matching section in the .secrets.conf; or
None if no match.
"""
for secrets_section_name in secrets_cp:
try:
submod_found, id_found = secrets_section_name.split('::')
if submod_found.strip().lower() == submod.strip().lower() \
and id_found.strip().lower() == main_id.strip().lower():
return secrets_section_name
except ValueError:
continue
return None | 3f6c43396dca03f5d664891068709bb31d89ffa7 | 111,269 |
def if_(test, result, alternative):
"""Like C++ and Java's (test ? result : alternative), except
both result and alternative are always evaluated. However, if
either evaluates to a function, it is applied to the empty arg list,
so you can delay execution by putting it in a lambda.
Ex: if_(2 + 2 == 4, 'ok', lambda: expensive_computation()) ==> 'ok' """
if test:
if callable(result):
return result()
return result
else:
if callable(alternative):
return alternative()
return alternative | 6a0fa2ec7e4a428b53f9b328f92013e1794731f8 | 111,272 |
import torch
def apply_homogeneous_affine_transform(transform: torch.Tensor, position: torch.Tensor):
"""
Apply an homogeneous affine transform (4x4 for 3D or 3x3 for 2D) to a position
Args:
transform: an homogeneous affine transformation
position: XY(Z) position
Returns:
a transformed position XY(Z)
"""
assert len(transform.shape) == 2
assert len(position.shape) == 1
dim = position.shape[0]
assert transform.shape[0] == transform.shape[1]
assert transform.shape[0] == dim + 1
# decompose the transform as a (3x3 transform, translation) components
position = position.unsqueeze(1).type(torch.float32)
return transform[:dim, :dim].mm(position).squeeze(1) + transform[:dim, dim] | 90c38f83b5a775fc596f2866a1a88f7a294608d4 | 111,273 |
def split_procs(procs, valid_frac = .2):
"""Split the procedures into a
training and validation set based
on start time
"""
procs.sort(key = lambda x: x.get_start_time())
split_ind = int((1-valid_frac) *len(procs))
training = procs[:split_ind]
validation = procs[split_ind:]
return (training, validation) | e28f5679631bbeeb9efa9b657d57e156f02d7625 | 111,275 |
def prime_factors(number):
"""Finds prime factors of an integer (by trial-division).
:param number: The integer to factor
:type number: int
:rtype: list of ints
**Examples**
>>> prime_factors(314)
[2, 157]
>>> prime_factors(31)
[31]
"""
factor = 2
factors = []
while factor * factor <= number:
if number % factor:
factor += 1
else:
number //= factor
factors.append(factor)
if number > 1:
factors.append(number)
return factors | de1726b6a54eddb8927a2eb700b0f466ef077364 | 111,279 |
from typing import Callable
import requests
from typing import Any
def send_get_request(url: str,
filter_function: Callable[[requests.Response], Any] =
lambda result: result.json()) -> Any:
"""
The function sends a get request to the given url and returns the result based on the filter
function.
NOTE: This function assumes a raw JSON object will be returned from the get call.
url: The url of the address where the get request is to be made to.
filter_function: A function that takes a requests.Response object and returns a result based on
the function. This is an optional parameter, and if not supplied, a json response will be sent.
return: resulting data passed through the filter function
"""
res = requests.get(url)
return filter_function(res) | d52a9942c1d213af38e65c5f09ac11f1473d85a9 | 111,288 |
def organizations_sync_out_doc_template_values(url_root):
"""
Show documentation about organizationsSyncOut
"""
optional_query_parameter_list = [
{
'name': 'state_served_code',
'value': 'string', # boolean, integer, long, string
'description': 'Limit the results to just the state requested.',
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
'format': 'json',
}
api_response = '[{\n' \
' "we_vote_id": string,\n' \
' "vote_smart_id": integer,\n' \
' "ballotpedia_page_title": string,\n' \
' "ballotpedia_photo_url": string,\n' \
' "organization_address": string,\n' \
' "organization_city": string,\n' \
' "organization_contact_form_url": string,\n' \
' "organization_contact_name": string,\n' \
' "organization_description": string,\n' \
' "organization_email": string,\n' \
' "organization_facebook": string,\n' \
' "organization_name": string,\n' \
' "organization_image": string,\n' \
' "organization_phone1": string,\n' \
' "organization_phone2": string,\n' \
' "organization_fax": string,\n' \
' "organization_state": string,\n' \
' "organization_type": string,\n' \
' "organization_twitter_handle": string,\n' \
' "organization_website": string,\n' \
' "organization_zip": string,\n' \
' "state_served_code": string,\n' \
' "twitter_description": string,\n' \
' "twitter_followers_count": integer,\n' \
' "twitter_location": string,\n' \
' "twitter_name": string,\n' \
' "twitter_profile_background_image_url_https": string,\n' \
' "twitter_profile_banner_url_https": string,\n' \
' "twitter_profile_image_url_https": string,\n' \
' "twitter_user_id": integer,\n' \
' "wikipedia_page_id": string,\n' \
' "wikipedia_page_title": string,\n' \
' "wikipedia_photo_url": string,\n' \
' "wikipedia_thumbnail_height": string,\n' \
' "wikipedia_thumbnail_url": string,\n' \
' "wikipedia_thumbnail_width": string,\n' \
'}]'
template_values = {
'api_name': 'organizationsSyncOut',
'api_slug': 'organizationsSyncOut',
'api_introduction':
"",
'try_now_link': 'apis_v1:organizationsSyncOutView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values | da1a1312c3768be362d0a91937b6ff2ffe512736 | 111,291 |
def divide(string, length):
"""
Taken (with permission) from https://github.com/TheElementalOfCreation/creatorUtils
Divides a string into multiple substrings of equal length
:param string: string to be divided.
:param length: length of each division.
:returns: list containing the divided strings.
Example:
>>>> a = divide('Hello World!', 2)
>>>> print(a)
['He', 'll', 'o ', 'Wo', 'rl', 'd!']
"""
return [string[length * x:length * (x + 1)] for x in range(int(len(string) / length))] | 4ecc4fb03cd3c36435644e8e5a4846a7e4354fb3 | 111,293 |
def penaltymodel_factory(priority):
"""Decorator to assign a `priority` attribute to the decorated function.
Args:
priority (int): The priority of the factory. Factories are queried
in order of decreasing priority.
Examples:
Decorate penalty model factories like:
>>> @pm.penaltymodel_factory(105)
... def factory_function(spec):
... pass
>>> factory_function.priority
105
"""
def _entry_point(f):
f.priority = priority
return f
return _entry_point | 701c9435ba78dfda1823dc5b90d0c23c444a355b | 111,295 |
def strip_id(dict):
"""Returns a dict that doesn't contain an 'id' key."""
return {k: v for k, v in dict.items() if k != 'id'} | 51095a02734f1e68bc7020a648b53a9e03ed118b | 111,300 |
def lapnumber_axis(ax, axis='xaxis'):
"""Set axis to integer ticks only."
Args:
ax: matplotlib axis
axis (='xaxis', optional): can be 'xaxis' or 'yaxis'
Returns:
the modified axis instance
"""
getattr(ax, axis).get_major_locator().set_params(integer=True, min_n_ticks=0)
return ax | 844b516f88fa22982fadbd52cdf9e93ac9ad634c | 111,301 |
def get_xi_from_ARPS_simulation(simulation):
"""Extract xi from full name of ARPS files"""
[topo_or_wind, N, dx, xi, sigma, ext] = simulation.split('_')
xi = xi.split('xi')[1]
return (xi) | a2669b087e3d079745bcbccdc58de2d129e5766d | 111,304 |
def isfloat(x):
"""
>>> isfloat(12)
True
>>> isfloat(12)
True
>>> isfloat('a')
False
>>> isfloat(float('nan'))
True
>>> isfloat(float('inf'))
True
"""
try: float(x)
except: return False
return True | 5a2b1047194b03a3807dbf362e665f854fdecd3e | 111,305 |
def _are_dependencies_handled(file_descriptor,
dependencies):
"""Returns True iff dependencies of descriptor are in dependencies."""
for dependency in file_descriptor.dependencies:
if dependency not in dependencies:
return False
return True | c73e8e629d60b4d3ffc9f2bfae8bd4e59293bc2b | 111,307 |
import re
def parse_urls(text):
"""Parses URL from text
Arguments:
text {string} -- string needing parsing
Returns:
(string) -- url
"""
search = re.search("(?P<url>https?://[^\s>]+)", text)
if search:
return search.group("url")
else:
return None | 635bc9c273b40e2f2c88f290d9a53de30e6e46fc | 111,310 |
def slices(series, length):
"""
Return list of slices of len "length" from series"
"""
if length < 0:
raise ValueError("slice length cannot be negative")
if length == 0:
raise ValueError("slice length cannot be zero")
if series == "":
raise ValueError("series cannot be empty")
if length > len(series):
raise ValueError("slice length cannot be greater than series length")
result = []
for i in range(0, len(series) - length + 1):
result.append(series[i:i + length])
return result | 78c226d82a67459fd4bf7ec70233438da6af404b | 111,311 |
from typing import OrderedDict
def copy_items(arg_dict, argnames):
"""Copy a set of parameters tuples to an smaller dictionary
Parameters
----------
arg_dict : `dict`
The dictionary mapping name to (type, default, helpstring) tuple
argnames : `list`
List of keys to copy to the output dictionary
Returns
-------
outdict : `dict`
Dictionary with only the arguments we have selected
"""
outdict = OrderedDict()
for argname in argnames:
if argname not in arg_dict:
raise KeyError("Argument %s is not defined in dictionary" % argname)
outdict[argname] = arg_dict[argname]
return outdict | e2b86ae77cbaf90c753a73ca18d7c5361eb7dfc4 | 111,312 |
def text_to_dict(file_path):
"""
Read in a text file as a dictionary where keys are text and values are indices (line numbers).
Used to read the act set and slot set.
# Arguments:
- ** file_path **: The path to the act set or slot set file
** return **: string-index dictionary
"""
result_set = {}
with open(file_path, 'r') as f:
index = 0
for line in f.readlines():
result_set[line.strip('\n').strip('\r')] = index
index += 1
return result_set | 1288193bb3644ac41c6966547f3cc3e3264470dc | 111,313 |
def _sort_by_name(item):
"""Sort key function."""
return item['name'] | 27b243fcc04b2db298736dca6b6a5f02b9a62694 | 111,314 |
def get_link_inf(f, path):
"""Given a path to a node, returns information about
the link, or None, if the path is not part of a link. Returned link_inf has keys:
link_type - type of link, either 'hard' or 'soft'
loc - location (key) of link group associated with link. i.e. in links['lg'][link_type]
is_source - For link_type "soft" returns True if is a link source (not the target
of the link-group). For link type 'hard' this is not returned.
Note: This routine called 'get_link_inf' (not 'get_link_info") because the returned
dictionary is different than what is stored in the node class, "link_info" dict.
"""
link_type = (
'hard' if path in f.links['path2lg']['hard']
else 'soft' if path in f.links['path2lg']['soft']
or path in f.links['lg']['soft'] # this in case path is the target
else 'ext' if path in f.links['path2lg']['ext'] else None)
if link_type is None:
# node is not in a hard, soft or ext link-group, so is not part of a link
return None
if link_type == 'soft':
# soft link. This is the target if it is the location for a soft link group
is_source = not path in f.links['lg']['soft']
loc = f.links['path2lg']['soft'][path] if is_source else path
link_inf = {'link_type': link_type, 'loc': loc, 'is_source': is_source}
else:
# must be hard or external. loc for hard is a tuple, for ext is file\npath
loc = f.links['path2lg'][link_type][path]
link_inf = {'link_type': link_type, 'loc': loc}
return link_inf | b454cd26f47635a29e8a329844a12207599e4e0c | 111,317 |
def location_to_point(location):
"""Converts a Location model to GeoJSON Point"""
point = {
"type": "Point",
"coordinates": [location.latitude, location.longitude],
"properties": {
"id": location.id,
}
}
if location.name:
point["properties"]["name"] = location.name
if location.description:
point["properties"]["description"] = location.description
if location.location_type:
point["properties"]["location_type"] = location.location_type
if location.ease_of_use:
point["properties"]["ease_of_use"] = location.ease_of_use
if location.safety:
point["properties"]["safety"] = location.safety
if location.capacity_type:
point["properties"]["capacity_type"] = location.capacity_type
if location.picture:
point["properties"]["picture"] = location.picture
return point | 49e89af8e5fe673c2f14f7dcd329a25f184ff971 | 111,320 |
def style_to_string(style):
"""
Takes a style dict and writes to ordered style text:
input: {'fill': 'rgb(100%,0%,0%', 'fill-opacity': 1.0}
returns: 'fill:rgb(100%,0%,0%);fill-opacity:1.0'
"""
strs = ["{}:{}".format(key, value) for key, value in sorted(style.items())]
return ";".join(strs) | b99d5150c9f7ec3b9567ba6752747aeed63203c1 | 111,322 |
def find_segments(j, e, c, OPT):
"""
Given an index j, a residuals dictionary, a line cost, and a
dictionary of optimal costs for each index,
return a list of the optimal endpoints for least squares segments from 0-j
"""
if j == -1:
return []
else:
vals = [(e[i][j] + c + OPT[i-1]) for i in range(0, j+1)]
min_index = vals.index(min(vals))
return find_segments(min_index-1, e, c, OPT) + [min_index] | 3f3ad3c4dee3bffceadba0ac6df76985048ac256 | 111,331 |
import math
def norm_entropy(probs):
"""get the normalized entropy based on a list of proabilities
Parameters
----------
probs: list
list of probabilities
Returns
-------
normalized entropy of the probabilities
"""
entropy = 0
for prob in probs:
if prob > 0:
entropy += prob * math.log(prob, math.e)
else:
entropy += 0
return - entropy / len(probs) | cfbcd504670d9eda7e47f2301309271b916b1d11 | 111,334 |
from pathlib import Path
def stem(p: str) -> str:
""" Remove all stems from a filename, e.g. foo.test.golden.fidl -> foo. """
while Path(p).stem != p:
p = Path(p).stem
return p | 466e5e2bd63130f8114d6f25ef64ce1787f6472f | 111,339 |
def cigar_to_int(cigar):
"""Convert a simple CIGAR string to overlap int
>>> cigar_to_int('71N')
-71
>>> cigar_to_int('3M')
3
"""
if cigar[-1] == 'N':
return -int(cigar[:-1])
return int(cigar[:-1]) | e1a9ae58072c3b37c919dd485550586d5b7f58e4 | 111,340 |
def get_adjacent_tiles(eris, level, coordinates):
"""Get adjacent tiles from coordinates at level on eris."""
adjacent = []
for level_change, (row, col) in coordinates:
if row == 2 and col == 2:
continue
try:
adjacent.append(
eris[level + level_change][row][col])
except KeyError:
pass
return adjacent | 4b65473d05b6d3c8343b83c4208f4b792bc19d9f | 111,347 |
def impuesto_iva12(monto=0):
""" Calcula el impuesto del IVA de 12 % """
total = ((monto * 12)/100)
return total | e4403542d87500b3161eca8123e50c06c5f1a36b | 111,350 |
def find_exact_match(row, groups, match_cols):
"""Find an exact mach for a row in groups based on match_cols.
"""
index = tuple(row[t] for t in match_cols)
try:
group = groups.get_group(index)
except KeyError:
return []
clus = list(set(group.hotel_cluster))
return clus | 33802a1f85854559bb28053fec04d546d3e27580 | 111,352 |
import time
def profile_timer(f, *args, **kwargs):
"""
A wrapper to run functions and tell us how long they took
:param f: function to run
:param args: ordered parameters
:param kwargs: keyword parameters
:return: value of f for those parameters
"""
t0 = time.time()
result = f(*args, **kwargs)
t1 = time.time()
print("{} ran in {:.3f} sec".format(f.__name__, t1-t0))
return result | c88075a2a1c0181c98ae293869c26b4af27b84c2 | 111,353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.