content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
from typing import Dict
from typing import List
def to_snake_case(
d: Dict[str, str], replace: List[str] = ["."]
) -> Dict[str, str]:
"""
Format the keys of the input dictionary to be in snake case.
Note
----
This converts keys from "Snake Case" to "snake_case".
Parameters
----------
d: Dict[str, str]
Contains the keys to convert
replace: List[str]
A list of characters to replace with blanks
Returns
-------
Dict[str, str]
The converted dictionary
"""
def _format_key(key: str) -> str:
for c in replace:
key = key.replace(c, "")
return key.lower()
return {
"_".join(_format_key(key).split()): value for key, value in d.items()
}
|
f9b954cb2faaabe9f0e07a5a2251b43b720564cd
| 379,207
|
def ganeti_host_to_netbox(ganeti_dict, additional_fields):
"""Takes a single entry from the Ganeti host list and returns just the fields pertinent to Netbox
along with any additional fields that need to be added"""
shortname = ganeti_dict["name"].split(".")[0]
output = {
"name": shortname,
"vcpus": ganeti_dict["beparams"]["vcpus"],
"memory": ganeti_dict["beparams"]["memory"],
"disk": round(sum(ganeti_dict["disk.sizes"]) / 1024, 0), # ganeti gives megabytes, netbox expects gigabytes
}
# admin_state is the desired state of the machine, which maps nicely to the status field.
if ganeti_dict["admin_state"] == "up":
output["status"] = "active"
else:
output["status"] = "offline"
output.update(additional_fields)
return output
|
d06cd8b65133e78e54d5b95d3149d897fb5dd462
| 286,283
|
def avg_wordlen(values,delimiter = " "):
"""
#### returns the average length of given values:
if not specified, space will be the basic delimiter
#### Example:
x = avg_wordlen("One two three four five six")
### print("avg is: ",x)
>>> avg is 3.6666666666666665
"""
enteredValues=values.split(" ")
x=0
for value in range(len(enteredValues)):
x=x+len(enteredValues[value])
return x/len(enteredValues)
|
641f841073788ce9df46790d431cbad4bdc93fb7
| 602,182
|
def pytest_ignore_collect(path, config):
""" Only load tests from feature definition file. """
if path.ext != ".toml":
return True
return False
|
80d193ff28a7f2f903ec5d4dd09d13973e066dcf
| 18,446
|
def dragSourceRegister(widget, action='copy', datatype='text/uri-list',
data=''):
"""
if the 'data' param is callable, it will be called every time to
look up the current data.
If the callable returns None (or data is None to begin with), the drag
"""
widget.tk.call('tkdnd::drag_source', 'register', widget._w)
# with normal Tkinter bind(), the result of your handler isn't
# actually returned so the drag doesn't get launched. This is a
# corrected version of what bind() does when you pass a function,
# but I don't block my tuple from getting returned (as a tcl list)
def init():
dataValue = data() if callable(data) else data
if dataValue is None:
return
return (action, datatype, dataValue)
funcId = widget._register(
init,
widget._substitute,
1 # needscleanup
)
widget.bind("<<DragInitCmd>>", funcId)
|
a5f73cf26d688a29381654adeee172871fe891b8
| 343,101
|
def num_digits_faster(n: int) -> int:
"""
Find the number of digits in a number.
abs() is used for negative numbers
>>> num_digits_faster(12345)
5
>>> num_digits_faster(123)
3
>>> num_digits_faster(0)
1
>>> num_digits_faster(-1)
1
>>> num_digits_faster(-123456)
6
"""
return len(str(abs(n)))
|
3cdeda0661eba088957d0caeaac123d851154ac1
| 480,166
|
def get_labor_input_baseline(
assets_this_period,
assets_next_period,
interest_rate,
wage_rate,
income_tax_rate,
productivity,
efficiency,
gamma,
):
""" Calculate optimal household labor input.
Arguments:
assets_this_period: np.float64
Current asset holdings (pre interest payment)
assets_next_period: np.float64
Savings for asset holdings next period (pre interest payment)
interest_rate: np.float64
Current interest rate on capital holdings
wage_rate: np.float64
Current wage rate on effective labor input
income_tax_rate: np.float64
Tax rate on labor income
productivity: np.float64
Current household productivity level (shock)
efficiency: np.float64
Age-dependent labor efficiency multiplier
gamma: np.float64
Weight of consumption utility vs. leisure utility
Returns:
labor_input: np.float64
Optimal hours worked
"""
if efficiency == 0.0:
labor_input = 0.0
elif gamma == 1.0:
labor_input = 1.0
else:
labor_input = (
gamma * (1.0 - income_tax_rate) * productivity * efficiency * wage_rate
- (1.0 - gamma)
* ((1.0 + interest_rate) * assets_this_period - assets_next_period)
) / ((1.0 - income_tax_rate) * productivity * efficiency * wage_rate)
if labor_input > 1.0:
labor_input = 1.0
elif labor_input < 0.0:
labor_input = 0.0
return labor_input
|
d7a8330cb30cee60235b2232f508ab52b57c855e
| 529,708
|
def StripRefsHeads(ref, strict=True):
"""Remove leading 'refs/heads/' from a ref name.
If strict is True, an Exception is thrown if the ref doesn't start with
refs/heads. If strict is False, the original ref is returned.
"""
if not ref.startswith('refs/heads/') and strict:
raise Exception('Ref name %s does not start with refs/heads/' % ref)
return ref.replace('refs/heads/', '')
|
badce5e73681817fb5ecf648433a5f650db52d05
| 456,458
|
def convert_distance(val, old_scale="meter", new_scale="centimeter"):
"""
Convert from a length scale to another one among meter, centimeter, inch,
feet, and mile.
Parameters
----------
val: float or int
Value of the length to be converted expressed in the original scale.
old_scale: str
Original scale from which the length value will be converted.
Supported scales are Meter ['Meter', 'meter', 'm'],
Centimeter ['Centimeter', 'centimeter', 'vm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil'].
new_scale: str
New scale from which the length value will be converted.
Supported scales are Meter ['Meter', 'meter', 'm'],
Centimeter ['Centimeter', 'centimeter', 'cm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil'].
Raises
-------
NotImplementedError if either of the scales are not one of the requested
ones.
Returns
-------
res: float
Value of the converted length expressed in the new scale.
"""
# Convert from 'old_scale' to Meter
if old_scale.lower() in ['centimeter', 'cm']:
temp = val / 100.0
elif old_scale.lower() in ['meter', 'm']:
temp = val
elif old_scale.lower() in ['inch', 'in']:
temp = val / 39.37008
elif old_scale.lower() in ['feet', 'ft']:
temp = val / 3.28084
elif old_scale.lower() in ['mile', 'mil']:
temp = 1609.344 * val
else:
raise AttributeError(
f'{old_scale} is unsupported. m, cm, ft, in and mile are supported')
# and from Meter to 'new_scale'
if new_scale.lower() in ['centimeter', 'cm']:
result = 100*temp
elif new_scale.lower() in ['meter', 'm']:
result = temp
elif new_scale.lower() in ['inch', 'in']:
result= 39.37008*temp
elif new_scale.lower() in ['feet', 'ft']:
result=3.28084*temp
elif new_scale.lower() in ['mile', 'mil']:
result=temp/1609.344
else:
raise AttributeError(
f'{new_scale} is unsupported. m, cm, ft, in and mile are supported')
return result
|
46cac6149753a2231e040c2507b71bbc23a3f607
| 686,740
|
import base64
def str_to_base64(data, encoding='utf-8'):
"""Codifica uma string (por padrão, UTF-8) em Base64.
:param str data: String a ser codificada em Base64.
:param str encoding: Opcional. O *encoding* da string `data`. Se não for
especificado, o padrão é `'utf-8'`.
:returns: Uma string UTF-8 contendo a massa de dados em Base64.
:rtype: str
"""
data_as_bytes = data.encode(encoding)
base64_data = base64.b64encode(data_as_bytes)
return base64_data.decode('utf-8')
|
a8012e75edec99a1c3379dd0c07b2165073b2a06
| 615,867
|
def p02_p01(M1, gamma):
"""Stagnation pressure ratio across a normal shock
:param <float> M1: Mach # before the shock
:param <float> gamma: Specific heat ratio
:return <float> Stagnation pressure ratio p02/p01
"""
t1 = (gamma + 1.0) / (2.0 * gamma * M1 ** 2 - (gamma - 1.0))
t2 = (gamma + 1.0) * M1 ** 2 / (2.0 + (gamma - 1.0) * M1 ** 2)
return t1 ** (1.0 / (gamma - 1.0)) * t2 ** (gamma / (gamma - 1.0))
|
c64500cc652560c3572e790d074f66e62900137b
| 158,957
|
import binascii
def bin2macaddress(data):
"""Convert a byte-string to a MAC address."""
mac = binascii.b2a_hex(data)
chunks = list()
for i in range(len(mac)):
if i % 2 == 0:
chunks.append(mac[i : i + 2])
result = b":".join(chunks)
return result.decode()
|
2c3f6989810adb6257cd169b817ab3d06f58feff
| 692,675
|
import struct
def decode_bip32_path(path: bytes) -> str:
"""Decode a BIP-32/44 path from bytes"""
parts = []
for i in range(0, len(path) // 4):
idx = i * 4
chunk = path[idx : idx + 4]
result = struct.unpack(">I", chunk)
if result[0] < 256:
# "public" BIP-44 derivation
parts.append(f"{result[0]}")
else:
# "private" BIP-44 derivation
part = result[0] - (0x80000000 & result[0])
parts.append(f"{part}'")
return "/".join(parts)
|
b31ca235e14d962ebec0dc0dd50685464a2dc292
| 281,323
|
def _convertToElementList(elements_list):
"""
Take a list of element node indexes deliminated by -1 and convert
it into a list element node indexes list.
"""
elements = []
current_element = []
for node_index in elements_list:
if node_index == -1:
elements.append(current_element)
current_element = []
else:
# We also add one to the indexes to suit Zinc node indexing
current_element.append(node_index + 1)
return elements
|
750a7a7780dc901b7e00cd8a36fdfd3638005322
| 18,637
|
def add_header_lines(lines, header):
"""Return list of lines with prepended header."""
return header + ["\n"] + lines
|
09ece62d844c2b24e0f123474280df503d398648
| 531,103
|
def IsEmpty(Value):
"""Determine whether the specified value is empty after converting
it in to a string and removing all leading and trailing white spaces. A value
of type None is considered empty.
Arguments:
Value (str, int or float): Text or a value
Returns:
bool : True, Text string is empty; Otherwsie, False.
"""
if Value is None:
return True
TextValue = "%s" % Value
TextValue = TextValue.strip()
return False if len(TextValue) else True
|
c9626240146eebedb99dd2ff225b7c86a2c7991f
| 495,491
|
import re
def regex_first_claim(fullclaim, maxlen):
"""Attempts to extract the first patent claim from the full set of claims.
Because patent claims have predictable strcuture, we can attempt to extract
the first claim based on a few rules. If none of these work, we return all
characters up to `maxlen`.
Args:
fullclaim: A string containing the full text of a patents claims.
maxlen: An upper limit on the size of the result. This limit is only used if
all previous extraction methods fail.
Returns:
A string containing the best estimate of the text of the first.
"""
# First try the simplest - split on the text '2.' or '2 .'.
split_on_2 = re.split(r'.\s+[2]\s*.', fullclaim)
if len(split_on_2) > 1:
return split_on_2[0]
# Next split on the first reference to 'claim 1'.
if 'claim 1' in fullclaim.lower():
return fullclaim.split('claim 1')[0]
# If none of the above worked, split on The (case sensistive). This word
# should only appear in dependent claims by convention.
if ' The ' in fullclaim:
return fullclaim.split(' The ')[0]
# Finally, just keep the first N chars based on maxlen input.
return fullclaim[:maxlen]
|
40b19d30dcc67b1e0da27cae62069d3c3daa1230
| 64,566
|
def get_subset_evidence(all_evidence_dict, subset_vars):
"""
Select evidence for certain variables only from a evidence_dict and return the variables for which there is evidence
and the corresponding values.
:param all_evidence_dict: (dict) The evidence dictionary.
:param subset_vars: (string list) the subset of variables for which to select evidence from all_evidence_dict.
:return: evidence variable names and values
"""
factor_scope_evidence = {v: all_evidence_dict[v] for v in subset_vars if v in all_evidence_dict}
evidence_vrs = list(factor_scope_evidence.keys())
evidence_values = list(factor_scope_evidence.values())
return evidence_vrs, evidence_values
|
2f3c1bbdd1e5a2a8961d63e6c539e965b412e266
| 500,362
|
import re
import string
def format_description(description):
# type: (str) -> str
"""Normalize whitespace, remove punctuation, and capitalize first letter"""
if len(description) == 0:
return ""
description = re.sub("\s+", description.strip(string.punctuation), " ")
return description[0].upper() + description[1:]
|
0917ce0ac6c63a2878857505671996b1cc553101
| 260,383
|
import string
def _ensure_identifier(name):
"""Convert ``name`` to a valid Python identifier
Returns a valid Python identifier in the form ``[A-Za-z_][0-9A-Za-z_]*``.
Any invalid characters are stripped away. Then all numeric leading
characters are also stripped away.
:raises NameError: if a valid identifier cannot be formed.
"""
# Note: the identifiers generated by this function must be safe
# for use with eval()
identifier = "".join(char for char in name
if char in string.ascii_letters + string.digits + "_")
try:
while identifier[0] not in string.ascii_letters + "_":
identifier = identifier[1:]
except IndexError:
raise NameError(
"Cannot form valid Python identifier from {!r}".format(name))
return identifier
|
0515a25b736f85bcf012f88a39876eb21420daff
| 390,420
|
def _choose_meta_graph_def_internal(saved_model, tags):
"""Find a MetaGraphDef within the SavedModel with exactly matching tags.
Args:
saved_model: A `SavedModel` protocol buffer.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
Returns:
The chosen `MetaGraphDef` protocol buffer. This can be used to further
extract signature-defs, collection-defs, etc. If tags cannot be found,
returns None.
"""
result = None
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(tags):
result = meta_graph_def
break
return result
|
c588a0917c75d209d7fd61ea4da275c49e0895e6
| 338,872
|
def remove(self, x):
"""
Remove the first item from the list whose value is equal to x.
It raises a ValueError if there is no such item.
"""
return self.output_list.remove(x)
|
ab9c31896f569d75be8367d12993020572d16efc
| 214,470
|
def pyeapi_result(output):
"""Return the 'result' value from the pyeapi output."""
return output[0]['result']
|
d4af079c3776ec7bfb6fcdcfd396836b2edc58fb
| 26,288
|
import hashlib
def md5(s, raw_output=False):
"""Calculates the md5 hash of a given string"""
res = hashlib.md5(s.encode())
if raw_output:
return res.digest()
return res.hexdigest()
|
238c2a6c6b06a046de86e514698c7ef5622f770b
| 706,162
|
import secrets
def random_with_float_step(start: int, stop: int, step: float) -> float:
"""Generates a random number between a range with a float step.
Parameters
----------
start : int
The inclusive lower bound
stop : int
The inclusive upper bound
step : float
The step of the range
Returns
-------
float
The generated float
"""
return secrets.randbelow(int((stop - start) / step)) * step + start
|
e26b7c4167e11d78eea20ed52a012273a3b3628e
| 123,463
|
def calc_priority(node, from_targets):
"""
ノードの優先度を返す
優先度の計算
ダミーノード:9999999999999999999999999999(大きい値)
その他:ソースまたはターゲットの個数
Args:
node: 優先度を計算したいノード
from_targets: ダミーノード以外において、Trueならtargetsから、Falseならsourcesから計算する。
Return:
計算結果 int
"""
if node.is_dummy:
return 9999999999999999999999999999
return len(node.targets) if from_targets else len(node.sources)
|
81889182d6095b03898b39e86e1c830d009069ec
| 289,236
|
def _get_cqlsh_for_query(query: str):
"""
Creates a `cqlsh` command for given query that will be executed over a TLS
connection.
"""
return 'cqlsh --cqlshrc="$MESOS_SANDBOX/cqlshrc" --ssl -e "{query}"'.format(
query=query)
|
4fe1f60d6a52d8a015cd1f0979d34779305f52eb
| 596,287
|
def split_into_year_month(ym: str) -> tuple[int, int]:
"""Splits a string contain a year and month into individual integers."""
return int(ym[:4]), int(ym[4:])
|
7f7e99ac60ec8faaaac0510345aac1e6d457fc82
| 565,181
|
def _get_response_headers(response):
"""
Django 3.2 (more specifically, commit bcc2befd0e9c1885e45b46d0b0bcdc11def8b249) broke the usage of _headers, which
were turned into a public interface, so we need this compatibility wrapper.
"""
try:
return response.headers
except AttributeError:
return response._headers
|
78e583bd079431e9485abf28c35f2488b7b22faa
| 272,976
|
def insert_with_key_enumeration(agent, agent_data: list, results: dict):
"""
Checks if agent with the same name has stored data already in the given dict and enumerates in that case
:param agent: agent that produced data
:param agent_data: simulated data
:param results: dict to store data into
:return: dict with inserted data/name pair
"""
# add to results dict and don't double agent names
if agent.get_name() not in results:
results[agent.get_name()] = agent_data
else:
# add index to agent name if another agent of same type was simulated before
new_name = agent.get_name() + "_" + str(
sum([agent.get_name() in s for s in list(results.keys())]))
results[new_name] = agent_data
return results
|
d2d653dcff20836c4eaf8cf55b31b1a1209a4ddd
| 5,922
|
import pathlib
def root() -> pathlib.Path:
"""Returns the repository root directory.
Returns:
pathlib.Path: The repository root directory.
"""
return pathlib.Path(__file__).parent.parent.parent
|
60a9484ac32a431024493052e04e5bc22692d42f
| 412,564
|
def determine_cursor(response):
"""
Function to determine whether a response ahs a cursor or not
:param response: Response of the URL as a json object
:return: Nonetype or the Cursor String if cursor was present in the response
"""
if 'cursor' in response:
cursor = response['cursor']
else:
cursor = None
return cursor
|
ca3ac68d550abf6344e55a1cb8a495cc96afd1cb
| 563,212
|
def get_default_plot_params(plot_type=''):
"""
Return the default parameters used for figure generation.
Parameter
---------
plot_type : string-optional
Defines some plot-specific parameters to use by default
Returns
-------
default_plot_params : dictionary
Dictionary key-value pairs used to define plotting parameters
"""
default_plot_params = {
'autoclose': False,
'autoscale_y':True,
'axis_bg_color':'w',
'colormap':'summer',
'color_order': ['#0072b2','#d55e00', '#009e73', '#cc79a7', '#f0e442', '#56b4e9'], #Seaborn 'colorblind'
'figsize':(8,6), #Inches
'fontsize':14,
'grid':'on',
'interactive': True,
'line_color':'default',
'line_style':'-',
'line_width': 2.0,
'marker':None,
'output_fig_path':'test',
'output_fig_type':'png',
'overlay_average':False,
'overlay_average_color': '#424949',
'reverse_x': False,
'save_fig': False,
'suppress_fig':False,
'tick_fontsize':12,
'title':'',
'use_colormap':False,
'use_sci_format_xaxis':False,
'use_sci_format_yaxis':True,
'xlabel':'',
'xlim':None,
'ylabel':'',
'ylim':None,
}
# Some other good choices for color order!
# 'color_order': ['#4c72b0', '#55a868', '#c44e52', '#8172b2', '#ccb974', '#64b5cd'] #Based on a Seaborn 'deep' color palette
# 'color_order': ['#4878cf', '#6acc65', '#d65f5f', '#b47cc7', '#c4ad66', '#77bedb'] #Seaborn 'muted'
# 'color_order': ['#92c6ff', '#97f0aa', '#ff9f9a', '#d0bbff', '#fffea3', '#b0e0e6'] #Seaborn 'pastel'
# Add more default plot-type specific parameters to the dictionary
if plot_type is 'spectrum': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':True})
elif plot_type is 'fid': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':False})
elif plot_type is 'spectra': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':True,'line_width':1.0,
'overlay_average':True})
elif plot_type is 'fids': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':False,'line_width':1.0,
'overlay_average':True})
else:
# Note: Here is a good spot to add customized plotting parameters for specific kinds of plots
pass
return default_plot_params
|
1468e19e25ad4a31ea70f309128707400912082e
| 410,964
|
def kewley_agn_sii(log_sii_ha):
"""Seyfert/LINER classification line for log([SII]/Ha)."""
return 1.89 * log_sii_ha + 0.76
|
73a9cde22bbf7d27fb4ba4cbcf72d7b7e31e8628
| 169,172
|
def replicate(s, n):
""" Replicate a value n times """
return [s for _ in range(n)]
|
b11e4cb97340360c22e409f378d6db1ef5a17fc5
| 161,661
|
from datetime import datetime
def timestamp_for_date(date_string):
"""
Input: date in format YYYY-MM-DD (str)
Output: timestamp (int)
"""
dt = datetime.strptime(date_string, "%Y/%m/%d")
return int(datetime.timestamp(dt))
|
042f2707b713ef41369016467712b26d9a95ad18
| 353,730
|
def if_(c, t, e=None):
"""
Return t or e based on condition.
"""
return t if c else e
|
0c467f6d4f92f7cbadf33b492540cf96b66953ca
| 305,711
|
def can_import_module(module_name):
"""
Check if the specified module can be imported.
Intended as a silent module availability check, as it does not print ModuleNotFoundError traceback to stderr when
the module is unavailable.
Parameters
----------
module_name : str
Fully-qualified name of the module.
Returns
----------
bool
Boolean indicating whether the module can be imported or not.
"""
try:
__import__(module_name)
return True
except Exception:
return False
|
aefb5320074f1e1917efb7b2858dc8e600476ba2
| 616,834
|
def log_dataframe_task(parent, df, **kwargs):
"""Log a dataframe within a `prefect` flow.
This `prefect` task can be used within a flow to
log a dataframe to an existing project or experiment.
Parameters
----------
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment to log the dataframe to.
df : pandas.DataFrame or dask.dataframe.DataFrame
The `pandas` or `dask` dataframe to log.
kwargs : dict
Additional keyword arguments to be passed to
`Project.log_dataframe` or `Experiment.log_dataframe`.
Returns
-------
rubicon.client.Dataframe
The logged dataframe.
"""
return parent.log_dataframe(df, **kwargs)
|
d859a48a424b7dbd058d9e6f0e12d578fa81d7b1
| 552,990
|
import ntpath
def get_log_file_name(myfile):
"""
This function obtains the name of file without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) #remove .log
return name
|
604fe6c9afefd899e4ceb972c7a0d0b1692396fb
| 614,008
|
def pg_index_exists(conn, schema_name: str, table_name: str, index_name: str) -> bool:
"""
Does a postgres index exist?
Unlike pg_exists(), we don't need heightened permissions on the table.
So, for example, Explorer's limited-permission user can check agdc/ODC tables
that it doesn't own.
"""
return (
conn.execute(
"""
select indexname
from pg_indexes
where schemaname=%(schema_name)s and
tablename=%(table_name)s and
indexname=%(index_name)s
""",
schema_name=schema_name,
table_name=table_name,
index_name=index_name,
).scalar()
is not None
)
|
98ebdc0db7f3e42050e61205fd17309d015352a0
| 1,326
|
def has_code(line: str) -> bool:
"""
Return True if there's code on the line
(so it's not a comment or an empty line).
"""
return not line.strip().startswith("#") or (line.strip() == "")
|
ef0975ee21deda1206a1bfc728f47d1119132c70
| 47,614
|
def paramsDictNormalized2Physical(params, params_range):
"""Converts a dictionary of normalized parameters into a dictionary of physical parameters."""
# create copy of dictionary
params = dict(params)
for key, val in params.items():
params[key] = val * (params_range[key][1]-params_range[key][0]) + params_range[key][0]
return params
|
f6608cf5b79ca7a0170efc34c98cc651289b6584
| 675,724
|
def generate_masks(raw):
""" Create "and" masks and "or" masks for all possible combinations of
"floating" bits ("X" bits can be either 0 or 1) """
masks = []
numx = raw.count("X")
bits = list(raw)
for variant in range(2**numx):
mask = list(f"{bin(variant)[2:]:0>{numx}}")
maskbits = bits.copy()
amask = ["1"] * 36
maskindex = 0
for i, bit in enumerate(bits):
if bit == "X":
maskbits[i] = mask[maskindex]
if mask[maskindex] == "0":
amask[i] = "0"
maskindex += 1
omask = int("".join(maskbits), 2)
amask = int("".join(amask), 2)
masks.append((omask, amask))
return masks
|
b0ecd6c4f7d4037b6852d8e3d7982d25c95038d9
| 145,913
|
def modified(number, percent):
"""return the amount (or any other number) with added margin given by percent parameter
(result has type float)
"""
if percent:
return number * (100 + percent) / 100.
else:
return float(number)
|
7421d717caed71da840d0bae644a1f95d517777b
| 676,881
|
def schema_version_from_label(label):
"""Return the schema version from the label name."""
return int(label.split(".")[1])
|
14d73d9d9333d30967be8ebf683a8d4c3b63bbdd
| 179,953
|
import re
def get_text_url_base(content):
"""Return base URL to full text based on the content of the landing page.
Parameters
----------
content : str
The content of the landing page for an rxiv paper.
Returns
-------
str or None
The base URL if available, otherwise None.
"""
match = re.match('(?:.*)"citation_html_url" content="([^"]+).full"',
content, re.S)
if match:
return match.groups()[0]
return None
|
c679e9e1e8b074f7fea3aaf9eb256542e20a4d7d
| 699,298
|
def euclidean_gcd(a,b):
"""Euclidean Algorithm to find greatest common divisor.
Euclidean algorithm is an efficient method for computing
the greatest common divisor (GCD) of two integers (numbers),
the largest number that divides them both without a remainder [Wiki].
Args:
a (int): The first integer, > 0,
b (int): The second integer, > 0.
Returns:
int: the greatest common divisor.
"""
if a < b:
a,b = b,a
while a > b:
a = a - b
if (a != b):
#print("a =", a, "b =", b)
a = euclidean_gcd(b, a)
return a
|
7530efcad9ab014cb65e73d51c2b177a410eea2e
| 565,248
|
def get_x_i(y):
"""
Returns x -2y = 0 solved for x
i.e. x = 2y
"""
return 2*y
|
55af7be5e60074244f62f0d127a886ae95ff4dc8
| 422,044
|
def less_equal(x, y):
"""
Check that x is less or equal to y
"""
return x <= y
|
023e839446f5f098c425c7f75aeac4b75fdb25a5
| 362,470
|
import math
def humidity_adjust_temp(rh1, t_c1, t_c2):
"""
Gives you would the relative humidity would be if just the temperature changed. See: [SensIntroHum]_
:param rh1: Initial relative humidity 0-100
:type rh1: float
:param t_c1: Initial temperature in Celsius.
:type t_c1: float
:param t_c2: The temperature to find the new RH at.
:type t_c2: float
:return: The adjusted RH (0-100) at Temperature t_c2
:rtype: float
:Example:
>>> import hygrometry
>>> hygrometry.humidity_adjust_temp(60, 25, 30)
44.784059201238314
"""
rh2 = rh1*math.exp(4283.78*(t_c1-t_c2)/(243.12+t_c1)/(243.12+t_c2))
return rh2
|
75030fe0d2dd57511d45837fc54fce0477e42e4e
| 337,430
|
from typing import Any
from typing import Union
def issubclass_(
cls: Any,
types: Union[type[Any], tuple[type[Any], ...]],
) -> bool:
"""Like `issubclass`, but do not raise error if value is not `type`."""
return isinstance(cls, type) and issubclass(cls, types)
|
f300d79f12a74ac549b6e7d66e1d865449b0eca9
| 49,599
|
def format_bird(ip_version, bird_version, cmd):
"""Prefixes BIRD command with the appropriate BIRD CLI command.
Arguments:
ip_version {int} -- IPv4/IPv6
bird_version {int} -- BIRD version
cmd {str} -- Unprefixed command
Returns:
{str} -- Prefixed command
"""
cmd_prefix = "birdc"
if bird_version == 1 and ip_version == 6:
cmd_prefix = "birdc6"
command = f'{cmd_prefix} "{cmd}"'
return command
|
bcf2e3aaacaae5a8a30efc5d1f792f0d308a1fc0
| 348,392
|
import re
def split_units(unit):
"""splits string `unit` based on capital letters"""
return re.findall('[A-Z][^A-Z]*', unit)
|
9d4ad1cb7c0418d248281261b5ecc721cd4820a7
| 374,832
|
def get_function_start(function_name: str, assembly_code: list) -> int:
"""Detects the line number of the first instruction of a function.
Searches for a function which is named with the parameter `function_name` in
the assembly code. After finding the function returns the line number of the
first instruciton of the function.
The presence of "<function_name>:" expression in a line indicates the start
of the function. The next line is the first instruction line of that
function.
Parameters
----------
function_name : str
The name of the function which will be searched in the assembly code.
assembly_code : list of str
Assembly code in which the main function will be searched.
Returns
-------
integer
The line number of the of the first instruction of the funtion in the
assembly code if successful.
Raises
------
Exception
If the function is not found in the assembly code.
"""
line_no: int = -1
function_declaration:str = '<' + function_name + '>:'
for index, line in enumerate(assembly_code):
if function_declaration in line:
line_no = index
break
if line_no == -1:
raise Exception("The function" + function_name + "could not be found "
"in the current assembly code.")
else:
return line_no + 1
|
1d46b763c3c8107f04211f50f0d72e8f6d49d1a4
| 411,260
|
def splitLast(myString, chunk):
"""
returns a tuple of two strings, splitting the string at the last occurence of a given chunk.
>>> splitLast('hello my dear friend', 'e')
('hello my dear fri', 'nd')
"""
p = myString.rfind(chunk)
if p > -1:
return myString[0:p], myString[p + len(chunk):]
else:
return myString, ''
|
6bdf7c2f3b16ca3b0885bdb41623183342c4d4af
| 668,553
|
def transform_data(data, transformer):
"""
Read a dictionary and apply transformations using the
`transformer` Transformer.
Return a tuple of:
([field names...], [transformed ordered dict...], [Error objects..])
"""
if not transformer:
return data
renamed_field_data = transformer.apply_renamings(data)
field_names = renamed_field_data[0].keys()
if transformer.field_filters:
renamed_field_data = list(transformer.filter_fields(renamed_field_data))
field_names = [c for c in field_names if c in transformer.field_filters]
if transformer.exclude_fields:
renamed_field_data = list(transformer.filter_excluded(renamed_field_data))
field_names = [c for c in field_names if c not in transformer.exclude_fields]
errors = transformer.check_required_fields(renamed_field_data)
if errors:
return field_names, data, errors
return field_names, renamed_field_data, errors
|
05448bb1c7f6e74b57ea0c87b606d27bb1821d2d
| 160,376
|
def create_table_if_not_exist(name: str, sql: str):
"""Creates tables if the don't exist in the database.
Args:
name (str): The name of the table.
sql (str): The SQL string to define the tables columns.
Returns:
None
"""
return f"CREATE TABLE IF NOT EXISTS {name} {sql}"
|
866c59320848b9a1a8b68db372f334e209d1a8f8
| 429,289
|
def short_hex(x):
"""Return shorthand hexadecimal code, ex: cc3300 -> c30"""
t = list(x)
if t[0] == t[1] and t[2] == t[3] and t[4] == t[5]:
return '%s%s%s' % (t[0], t[2], t[4])
else:
return x
|
9b1efac8e11fe84ae7d07df7eb49b4404d79f694
| 214,348
|
def decode_exit(exit_code):
"""Decodes the exit code returned by os.system() into a tuple
containing the exit status, the signal, and 1 if the core was dumped.
See os.wait() for the specification of exit_code"""
status = exit_code >> 8 # the high byte
signal = exit_code & 0x7f # the lowest 7 bits
core = (exit_code & 0xff) >> 7 # the high bit of the low byte
return (status, signal, core)
|
cdd53624e13048114ec55df44764a044270f1341
| 118,471
|
import re
def _grep_prop(filename, prop_name):
"""
Look for property in file
:param filename: path to file and file name.
:param prop_name: property to search for in file.
:return: property value or None.
"""
fdata = open(filename, "r").read()
obj = re.search("^{0} = ['|\"](.+)['|\"]$".format(prop_name), fdata, re.MULTILINE)
if obj:
return obj.group(1)
return None
|
7e604f0a1069ee93bee0486802a2d516318fef4b
| 650,910
|
def get_drug_dcid(row):
"""Returns dcid of a drug.
If the ChEMBL ID of the drug was not found, then a new dcid for the drug is
created based on the pharmGKB id.
"""
if row['ChEMBL ID']:
return 'bio/' + row['ChEMBL ID']
return 'bio/' + row['PharmGKB Accession Id']
|
a8b5b5d639e4aa07a6778f589b23415106658b45
| 57,157
|
def remove_quotation_marks(source_string):
"""
:param source_string: String from which quotation marks will be removed (but only the outermost).
:return: String without the outermost quotation marks and the outermost white characters.
"""
first = source_string.find('"')
second = source_string[first+1:].rfind('"')
return source_string[first+1: second+first+1][:].strip()
|
13157e80cb701b915e0ccab1e38f9099de7f90bb
| 669,336
|
def second_differences(signal):
"""The mean of the absolute values of the second differences of the raw signal"""
sec_diff = []
for i in range(0,len(signal)-2):
sec_diff.append(abs(signal[i+2]-signal[i]))
fd_sum = sum(sec_diff)
delta = float(fd_sum)/(len(signal)-2)
return(delta)
|
fb394697e922bde829bf86f1ac97139b96afffe2
| 51,108
|
import copy
def tuple_to_string(tuptup):
""" Converts a tuple to its string representation. Uses different separators (';', '/', '|') for
different depths of the representation.
Parameters
----------
tuptup : list
Tuple to convert to its string representation.
Returns
-------
str
String representation of the input tuple.
"""
def join_deepest(tup, sep=';'):
""" Recursive function to create the string representation for the deepest level of the
tuptup list.
Parameters
----------
tup : object
Element to join if list or list of lists.
sep : str, optional
Separation character to join the list elements by.
Returns
-------
object
List containing joined string in max depth. Str if input depth = 1.
"""
if not isinstance(tup, list):
return str(tup)
if not isinstance(tup[0], list):
return sep.join([str(x) for x in tup])
for idx, val in enumerate(tup):
tup[idx] = join_deepest(val, sep)
return tup
tup = copy.deepcopy(tuptup)
tup = join_deepest(tup, ';')
tup = join_deepest(tup, '/')
tup = join_deepest(tup, '|')
return tup
|
a11fa135e5e001b57c6c4c0255de6e3896e97048
| 624,381
|
def filter_blast_result(alignment, record):
"""
The binding pocket of the target structure should be covered by identical residues in the test sequence. We
don't know where the target-sequence binding pocket is located. To increase the chances that the test sequence
and target sequence have identical binding pockets, this function checks the following criteria:
(i) X % of the target sequence must have a test-sequence alignment partner. Where X is given by the class
variable, length threshold.
(ii) Y % of the aligned sequence must be identical to the target sequence. Where Y is given by the class
variable, identity_threshold.
:param alignment: Bio.Blast.Record.Alignment object
:param record: Bio.Blast.Record object
:return: True if criteria are satisfied otherwise return False
"""
length_threshold = 0.9
identity_threshold = 0.95
minimum_coverage = round(length_threshold * record.query_length) # the smallest no. of target sequence
# AA that align with the test sequence.
minimum_identity = round(minimum_coverage * identity_threshold) # the smallest no. of identical AA in
# the alignment.
# sum over the HSP (High Scoring Pairs of aligned test sequence residues)
aligned_test_seq_length = 0
identical_test_seq_length = 0
for hsp in alignment.hsps:
aligned_test_seq_length += hsp.align_length
identical_test_seq_length += hsp.identities
# check criteria
if (minimum_coverage <= aligned_test_seq_length) & (minimum_identity <= identical_test_seq_length):
return True
else:
return False
|
ae1c3280a832acc6ec2d77e9043bc3626e57d578
| 644,976
|
import functools
def toggle_flag(flag_name):
"""Create a decorator which checks if flag_name is true before running
the wrapped function. If False returns None.
"""
def wrap(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if getattr(self, flag_name):
return f(self, *args, **kwargs)
return None
return wrapper
return wrap
|
cb86ed7708a97babd837de5d00beb4027f4dbd7e
| 561,827
|
def hot_test(blue, red):
"""Haze Optimized Transformation (HOT) test
Equation 3 (Zhu and Woodcock, 2012)
Based on the premise that the visible bands for most land surfaces
are highly correlated, but the spectral response to haze and thin cloud
is different between the blue and red wavelengths.
Zhang et al. (2002)
Parameters
----------
blue: ndarray
red: ndarray
Output
------
ndarray: boolean
"""
thres = 0.08
return blue - (0.5 * red) - thres > 0.0
|
172b7ab0eb23cb3ba4642ca758fe8d84e7bcbd34
| 315,903
|
def fillText(text, continuationPrefix="", maxLineLength=76):
"""Add line-breaks to <text>.
Replace ' ' with '\n'+<continuationPrefix> as necessary to ensure no
line longer than <maxLineLength>. Return the result as a string, ending
with '\n'.
The text should not contain newlines, tabs, or multiple consecutive
spaces.
"""
text = text.rstrip()
if len(text) <= maxLineLength:
return text + "\n"
# Find a space
spaceIndex = text.rfind(" ", 0, maxLineLength + 1)
if spaceIndex == -1 or text[:spaceIndex].rstrip() == "":
# Couldn't find an embedded space w/in line length
spaceIndex = text.find(" ", maxLineLength + 1)
if spaceIndex == -1:
# Couldn't find any embedded space in string
return text + "\n"
# Split the string
returnText = text[:spaceIndex] + "\n" + continuationPrefix
text = text[spaceIndex+1:]
maxContLength = maxLineLength - len(continuationPrefix)
while len(text) > maxLineLength:
# Find a space
spaceIndex = text.rfind(" ", 0, maxContLength + 1)
if spaceIndex == -1:
# Couldn't find an embedded space w/in line length
spaceIndex = text.find(" ", maxLineLength + 1)
if spaceIndex == -1:
# Couldn't find any embedded space in string
break
# Split the string
returnText += text[:spaceIndex] + "\n" + continuationPrefix
text = text[spaceIndex+1:]
return returnText + text + "\n"
|
33a31fd65e94733da247f6e11315b8f61730dad0
| 209,855
|
def tag2String(tag):
"""Takes a tag placement, and turns it into a verbose string"""
return ("Tag with ID #%d, was placed @ (%f,%f), facing %f deg" %
(tag['id'], tag['x'], tag['y'], tag['th_deg']))
|
5eac5e77228e81efe9968475c85b9d79ab5a2960
| 687,900
|
def apply_reduction (numpy_cloud, min_x_coordinate, min_y_coordinate ):
"""Reduce the cloud's point coordinates, so all points are closer to origin """
numpy_cloud[:, 0] = numpy_cloud[:, 0] - min_x_coordinate
numpy_cloud[:, 1] = numpy_cloud[:, 1] - min_y_coordinate
return numpy_cloud
|
d9c41b30a6d12568e0d49c97e10ff3ace46e479e
| 414,018
|
def data(reader, chunk=100):
"""Creates a pandas DataFrame that is a subset of the entire file
based on chunk
Parameters
----------
reader : pd.DataFrame
A pandas.DataFrame, use grab_files.reader
chunk : int
The number of rows to grab in the first chunk.
Future versions will allow this to be looped over to work through the entire file.
Returns
-------
pd.DataFrame that is a subset of the entire file
"""
data_chunk = reader.get_chunk(chunk)
return data_chunk
|
1ea39b2ca767e389c7c6bde83b3e9a6324f3d187
| 667,609
|
def string_file(source):
"""
Read the content of a file (source) into a string. Leading and trailing
white space is removed.
"""
with open(source) as f:
s = f.read().strip()
return s
|
7e5116ad69f0b96b50eca4bb29a2899369706fbf
| 163,141
|
import math
def discounted_cumulative_gain(relevance_list, p):
"""
Method that given a list of relevant documents (in this case the ranking positions),
calculates the discounted cumulative gain for the list at position p. In order for this measure to be effective,
the final score should be normalized (Dividing the ideal dcg by the dcg). DCG is defined as:
dcg_p = sum(from i=1, to p) rel_i/log2(i+1)
:param relevance_list list of elements to consider
:param p position to which we want to calculate the score
"""
dcg_p = 0
i = 1
for element in relevance_list:
if i <= p:
dcg_p += element/math.log2(i+1)
i += 1
else:
break
return dcg_p
|
cfa43b7dcd18212a0229886cfd5ee41f47efd784
| 114,813
|
def listify_text(filename):
"""Returns a list made from the lines of the text file passed it"""
input_file = open(filename, 'r')
input_data = input_file.read() # read file as one big chunk
input_file.close()
input_data = input_data.splitlines() # split file into list of lines
return input_data
|
41386d2e76700982afc0eb315ae8bb179228c122
| 528,581
|
import json
def load_from_json(file_path):
"""
Loading data from a json file.
Input:
file_path: (the source file)
Output:
data: the loaded data
"""
# catching possible pathlib.Path object
if isinstance(file_path, str) is False:
file_path = str(file_path)
# checking if file_path is a json file
assert file_path.endswith('.json'), 'Given file_path is invalid for saving into json file'
with open(file_path, 'r') as infile:
data = json.load(infile)
return data
|
1265b060633b3422df31deb477835eb8bcf3b3fd
| 625,858
|
def class_size(cls):
"""Get the number of bytes per element for a given data type.
Parameters:
cls (str): Name of the data type
Returns:
int: Number of byte per element
"""
if cls in ['float64', 'int64', 'uint64']:
n_byte = 8
elif cls in ['float32', 'int32', 'uint32']:
n_byte = 4
elif cls in ['bool', 'str', 'int8', 'uint8']:
n_byte = 1
else:
raise TypeError('invalid data type')
return n_byte
|
d645f16dde69594b4d2374066b984c93fb892267
| 585,777
|
def inv(q, p):
"""
calculate q^-1 mod p
"""
for i in range(p):
if q * i % p == 1:
return i
|
83ae3cb00813eb8351bfd413ce976e607497a322
| 56,587
|
def recv_from_gh_client(socket):
"""Connect, receive, and decode data received from socket to a list.
Arguments:
socket -- A socket.socket() object to receive data.
Returns:
return_lst -- A list of floats sent from Grasshopper.
"""
socket.listen()
conn, _ = socket.accept()
with conn:
return_byt = conn.recv(1024)
return_str = return_byt.decode()
return_lst = [ float(value) for value in return_str.split()]
return return_lst
|
78ebb6bb126ebb955c6feac93f45db6661b0f211
| 240,220
|
import torch
def from_onehot(y):
"""Converts one-hot encoded probabilities to label indices.
Parameters
----------
y: Tensor or Variable
Returns
-------
Tensor or Variable containing the label indices.
"""
_, labels_pred = torch.max(y, 1)
return labels_pred
|
dca11cc0b75447d64b06925d6fa2c443cb8e0128
| 619,938
|
def lattice_to_strings(lattice, token_type="byte"):
"""Returns tuple of output strings.
Args:
lattice: Epsilon-free acyclic WFSA.
token_type: Output token type, or symbol table.
Returns:
An tuple of output strings.
"""
return tuple(lattice.paths(token_type).ostrings())
|
9fba2ce14343fa48ddbdddf3a96a0c371cc183a9
| 492,556
|
def M_top(M_lc, x_aver_top, M_hc):
"""
Calculates the molar mass at top of column.
Parameters
----------
x_aver_top : float
The average mol concentration at top of column, [kmol/kmol]
M_lc : float
The molar mass of low-boilling component, [kg/kmol]
M_hc : float
The molar mass of high-boilling component, [kg/kmol]
Returns
-------
M_top : float
The molar mass at top of column, [kg/kmol]
References
----------
Дытнерский, стр. 230, формула 6.6
"""
return (M_lc * x_aver_top + M_hc * (1 - x_aver_top))
|
7891839343a08f58a99da71707f8f3ce9675901c
| 481,495
|
def mag(db_image):
"""Reverse of log/db: decibel to magnitude"""
return 10 ** (db_image / 20)
|
6f8271e4e7f785a881a15f3f8fac141a02557348
| 644,308
|
def has_slide_type(cell, slide_type):
"""
Select cells that have a given slide type
:param cell: Cell object to select
:param slide_type: Slide Type(s): '-', 'skip', 'slide', 'subslide', 'fragment', 'notes'
:type slide_type: str / set / list
:return: a bool object (True if cell should be selected)
"""
if isinstance(slide_type, str):
slide_type = {slide_type}
return all(f(cell) for f in [lambda c: 'slideshow' in c.metadata,
lambda c: 'slide_type' in c.metadata['slideshow'],
lambda c: c.metadata['slideshow']['slide_type'] in slide_type])
|
edb1323331317d53502179fe357c151a5b59af0b
| 702,737
|
def UnescapeGNString(value):
"""Given a string with GN escaping, returns the unescaped string.
Be careful not to feed with input from a Python parsing function like
'ast' because it will do Python unescaping, which will be incorrect when
fed into the GN unescaper."""
result = ''
i = 0
while i < len(value):
if value[i] == '\\':
if i < len(value) - 1:
next_char = value[i + 1]
if next_char in ('$', '"', '\\'):
# These are the escaped characters GN supports.
result += next_char
i += 1
else:
# Any other backslash is a literal.
result += '\\'
else:
result += value[i]
i += 1
return result
|
1e56052fd307d7255d4765448699bc9a55671b2d
| 250,709
|
import re
def isXML(file):
"""Return true if the file has the .xml extension."""
return re.search("\.xml$", file) != None
|
7fcfbb105a59f7ed6b14aa8aa183aae3fdbe082d
| 13,637
|
def build_splat_qth(transmitter):
"""
Return the text content of a SPLAT! site location file
(QTH file) corresponding to the given transmitter.
INPUT:
- ``transmitter``: dictionary of the same form as any one of the elements in the list output by :func:`read_transmitters`
OUTPUT:
String.
"""
t = transmitter
# Convert to degrees east in range (-360, 0] for SPLAT!
lon = -t['longitude']
return "{!s}\n{!s}\n{!s}\n{!s}m".format(
t['name'],
t['latitude'],
lon,
t['antenna_height'])
|
f9e8560f91ad7ad03d11a098d4c6167cb303186a
| 269,173
|
def opts_dd(lbl, value):
"""Format an individual item in a Dash dcc dropdown list.
Args:
lbl: Dropdown label
value: Dropdown value
Returns:
dict: keys `label` and `value` for dcc.dropdown()
"""
return {'label': str(lbl), 'value': value}
|
d9a9b97b9c586691d9de01c8b927e4924eba3a3e
| 654,113
|
def get_leaves_with_labels(tree):
"""
Return leaves in the tree, as well as their labels
>>> from ptb import parse
>>> t = parse("(4 (4 (2 A) (4 (3 (3 warm) (2 ,)) (3 funny))) (3 (2 ,) (3 (4 (4 engaging) (2 film)) (2 .))))")
>>> get_leaves_with_labels(t)
[('A', 2), ('warm', 3), (',', 2), ('funny', 3), (',', 2), ('engaging', 4), ('film', 2), ('.', 2)]
>>> t = parse("(2 .)")
"""
def aux(t):
if len(t) == 2: # leaf
return [(t[1], t[0])]
elif len(t) == 3:
return aux(t[1]) + aux(t[2])
else:
raise ValueError("length shoud be 2,3 or 4 for input '%r'" %(t,))
return aux(tree)
|
384fa89e89da0de61acc338570344ac027feb488
| 649,392
|
import configparser
def load_seqinfo(fname):
"""Loads seqinfo.ini file."""
config = configparser.ConfigParser()
config.read(fname)
return dict(config['Sequence'])
|
0ec13b31baf3152101099c442e8b506e118b0bf3
| 499,819
|
def _get_gtf_column(column_name, gtf_path, df):
"""
Helper function which returns a dictionary column or raises an ValueError
abou the absence of that column in a GTF file.
"""
if column_name in df.columns:
return list(df[column_name])
else:
raise ValueError(
"Missing '%s' in columns of %s, available: %s" % (
column_name,
gtf_path,
list(df.columns)))
|
62a7cfcd84a791b935e2133a94e87f4ac98cb57e
| 473,650
|
def dp_make_weight(egg_weights, target_weight, memo={}):
"""
Find number of eggs to bring back, using the smallest number of eggs. Assumes there is
an infinite supply of eggs of each weight, and there is always a egg of value 1.
Parameters:
egg_weights - tuple of integers, available egg weights sorted from smallest to largest
value (1 = d1 < d2 < ... < dk)
target_weight - int, amount of weight we want to find eggs to fit
memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this
parameter depending on your implementation)
Returns: int, smallest number of eggs needed to make target weight
"""
# This will be the key used to find answers in the memo
subproblem = (egg_weights, target_weight)
# If we've already stored this answer in the memo, return it
if subproblem in memo:
return memo[subproblem]
# If no eggs are left or no space is left on ship, there's nothing left to do
if egg_weights == () or target_weight == 0:
return 0
# If the next heaviest egg is too heavy to fit, consider subset of lighter eggs
elif egg_weights[-1] > target_weight:
result = dp_make_weight(egg_weights[:-1], target_weight, memo)
else:
# Find the minimum number of eggs by testing both taking heaviest egg and not
# taking heaviest egg.
this_egg = egg_weights[-1]
num_eggs_with_this_egg = 1 + dp_make_weight(
egg_weights,
target_weight - this_egg,
memo)
num_eggs_without_this_egg = dp_make_weight(egg_weights[:-1], target_weight, memo)
if num_eggs_without_this_egg != 0:
result = min(num_eggs_with_this_egg, num_eggs_without_this_egg)
else:
result = num_eggs_with_this_egg
# Store this answer in the memo for future use.
memo[subproblem] = result
return result
|
8546ab2dd0394d2864c23a47ea14614df83ec2f7
| 13,612
|
def error_message(e):
"""
Returns a custom error message
:param e: error raised
:type e: PermissionError|OSError
:return: custom error message
:rtype: str
"""
errno, strerror = e.args
return f"Error: [Errno {errno}] {strerror}"
|
2b24d7a9b4b503d8310e291487848145dbfa53e5
| 294,163
|
from typing import Dict
from typing import Any
def _unpack_player_pts(
year: int, week: int, player_pts_dict: Dict[str, Any]
) -> Dict[str, str]:
"""
Helper function to unpack player points nested dictionaries.
"""
((_, points_dict),) = player_pts_dict.items()
points_dict = points_dict["week"][str(year)].get(str(week))
return points_dict
|
d462ed631b5e37fd4eb055885507e8ad5e93ed60
| 235,736
|
def element_to_be_selected(element):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def _predicate(_):
return element.is_selected()
return _predicate
|
db795fd0e0bc4ce7c8f9bff0a17077aec5c9f5d6
| 315,222
|
def string2token(t,nl,nt):
"""
This function takes a string and returns a token. A token is a tuple
where the first element specifies the type of the data stored in the
second element.
In this case the data types are limited to numbers, either integer, real
or complex, and strings. The types a denoted as follows:
i - integer
f - float/real
c - complex
s - string
For navigational purposes two more elements added to identify the line
number (nl) the token was on, and the token number (nt) within the line.
"""
try:
i_a = int(t)
#
# Toldiff should recognise that -0 and 0 are the same, however, in
# a text based comparison that is not automatic so we have to force this.
#
if i_a == 0:
i_a = 0
token = ("i",i_a,nl,nt)
except ValueError:
#
# In Fortran double precision constants are often printed with a
# "D" for the exponent rather than an "E", i.e. 1.0E+01 might be
# printed as 1.0D+01 in Fortran. Python is not aware of this convention
# so we need to replace any potential "D"-s to obtain valid floating
# values.
#
z = t.replace("d","e")
z = z.replace("D","e")
try:
i_f = float(z)
#
# Toldiff should recognise that -0.0 and 0.0 are the same, however,
# in a text based comparison that is not automatic so we have to
# force this.
#
if i_f == 0.0:
i_f = 0.0
token = ("f",i_f,nl,nt)
except ValueError:
#
# The handling of complex numbers is unlikely to work in practice
# as in most cases complex numbers are printed as (1.0,2.0)
# rather than 1.0+2.0j. Therefore it is impossible to reliably
# distinguish between a complex number and a list of 2 real numbers.
#
try:
i_c = complex(z)
#
# Toldiff should recognise that x-0.0j and x+0.0j and that
# -0.0+y*j and 0.0+y*j are the same, however, in a text based
# comparison that is not automatic so we have to force this.
#
if i_c.real == 0.0:
i_c = complex(0.0,i_c.imag)
if i_c.imag == 0.0:
i_c = complex(i_c.real,0.0)
token = ("c",i_c,nl,nt)
except ValueError:
token = ("s",t,nl,nt)
return token
|
23fd5da01a49076b1fcf474fbe1047329ad7471a
| 704,852
|
def split_channel(tensor, split_type='simple'):
"""
Split channels of tensor
:param tensor: input tensor
:type tensor: torch.Tensor
:param split_type: type of splitting
:type split_type: str
:return: split tensor
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
assert len(tensor.shape) == 4
assert split_type in ['simple', 'cross']
nc = tensor.shape[1]
if split_type == 'simple':
return tensor[:, :nc // 2, ...], tensor[:, nc // 2:, ...]
elif split_type == 'cross':
return tensor[:, 0::2, ...], tensor[:, 1::2, ...]
|
a74b3b808c9a94188ba9432da681bafe4d8564d5
| 536,904
|
def headers(group_id, token):
"""
Generate the headers expected by the Athera API. All queries require the active group, as well as authentication.
"""
return {
"active-group": group_id,
"Authorization" : "Bearer: {}".format(token)
}
|
f86edb9151da099bf9818ffbf8a4eb66f4becb67
| 38,567
|
def IntToRgb(RGBint: int): # -> typing.Tuple[int,int,int]:
"""Converts a integer color value to a RGB tuple
:param RGBint: :class:`int`
The integer color value.
:returns: :class:`tuple[int,int,int]`
RGB tuple
"""
blue = RGBint & 255
green = (RGBint >> 8) & 255
red = (RGBint >> 16) & 255
return red, green, blue
|
c832d354014589def6ef8cb0888ee2f1f050ff5d
| 221,623
|
import base64
def _b64_encode(text):
"""Encode a string to base64. Unlike base64.b64encode,
input and output are utf-8 strings. """
return base64.b64encode(text.encode('utf-8')).decode('utf-8')
|
2a7275da4812ec3c690a41bec89adb5cd0ed1291
| 184,859
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.