content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_table_id_extension(data):
"""Gets the table id extension from the given section data
Parses the given array of section data bytes and returns the table ID extension value.
"""
tide = (data[3] << 8) + data[4]
return tide | c85b329b232be8a5aa449feb538a2bda310e0af4 | 112,577 |
import yaml
def read_default_config() -> dict:
"""
Read a default config in the config file
:rtype: dict
:return: Returns the default config
"""
with open(file="config/config.yml", mode='r', encoding='utf8') as file:
default_config: dict = yaml.safe_load(file)["default"]
return default_config | f0af39a6c050afb4f345da2a838dc83c328ed212 | 112,578 |
def _rgroup_sort(r):
"""Sort groups like R1 R2 R10 not R1 R10 R2
"""
if r[0] == "R": return ("R", int(r[1:]))
return (r, None) | d9cb36027ff8f20fc0a9b87213aaa67ff76f9e3e | 112,581 |
def get_percent(key, row):
"""Reads a percentage from a row."""
if key in row and row[key]:
percent = row[key]
if '%' in percent:
return float(percent.replace('%', '')) / 100.0
else:
return float(percent)
else:
return None | 7b54a171756c1e53fa42f3c837353c3776f6df33 | 112,585 |
def combine_relation_and_node(relationship):
"""
Combine a neo4j Relation's properties and the relation's end Node's properties
into a single dictionary.
@param relationship: the neo4j Relationship to transform.
"""
relation_props = relationship.get_properties()
node_props = relationship.get_end_node().get_properties()
return {'relation':relation_props, 'node':node_props} | f84a319656d070441e5ebb43afc27e0895f001ad | 112,587 |
def test(model, loss_function, torch_ref, data, labels, device):
""" A generic testing function without batching.
Arguments:
model (object): Remote or local model.
torch_ref (object): Remote or local torch reference.
loss_function (object): Remote or local loss function.
data (torch.Tensor): A Tensor or TensorPointer conatining the test data.
labels (torch.Tensor): A Tensor or TensorPointer containing the test labels.
device (torch.device): The device to train on.
"""
model.eval()
data = data.to(device)
labels = labels.to(device)
length = len(data)
with torch_ref.no_grad():
output = model(data)
test_loss = loss_function(output, labels)
prediction = output.argmax(dim=1)
total = prediction.eq(labels).sum().item()
acc_ptr = total / length
if model.is_local:
acc = acc_ptr
loss = test_loss.item()
else:
acc = acc_ptr.get(reason="To evaluate training progress", request_block=True, timeout_secs=5)
loss = test_loss.item().get(reason="To evaluate training progress", request_block=True, timeout_secs=5)
return acc, loss | c79f02e419683e29385540cd0df77a8ff71f0b8a | 112,591 |
def SetUpdateMask(ref, arguments, req):
"""Constructs updateMask for endpoint patch requests."""
del ref
service_accounts_mask = 'serviceAccounts'
labels_mask = 'labels'
update_mask = set()
if (arguments.clear_service_accounts or
arguments.IsSpecified('remove_service_accounts') or
arguments.IsSpecified('add_service_accounts')):
update_mask.add(service_accounts_mask)
if arguments.clear_labels:
update_mask.add(labels_mask)
labels_update_mask_prefix = labels_mask + '.'
if labels_mask not in update_mask:
if arguments.update_labels:
for key in arguments.update_labels:
update_mask.add(labels_update_mask_prefix + key)
if arguments.remove_labels:
for key in arguments.remove_labels:
update_mask.add(labels_update_mask_prefix + key)
req.updateMask = ','.join(sorted(update_mask))
return req | 25203e952f9f3cd20bda28df6cc850bbe4d0253b | 112,596 |
def generate_quiz(list_of_mapped_qna):
"""Generates a quiz that takes user input for answers. The questions take the list_of_mapped_qna output from the generate_mapped_questions_from_file function as input. Similar to the print_questions function, but with pauses to allow for user input. Returns a score."""
max_score = len(list_of_mapped_qna)
score = 0
for index, choices_dict in enumerate(list_of_mapped_qna):
# The correct answer is the answer where correctorwrong is "correct"
correct_answer = ""
# choices are the choices n the question.
set_of_choices = set()
for choice, value in choices_dict.items():
if choice == "question":
print(f"\nQuestion {index + 1}: {choices_dict['question']}")
else:
print(f"{choice}: {value.answer}")
set_of_choices.update([choice])
if value.correctorwrong == 'correct':
correct_answer = choice
given_answer = input("Type your answer here:\n")
while given_answer not in set_of_choices:
given_answer = input("Type your answer here FROM THE CHOICES (letters):\n")
if given_answer.lower() == correct_answer.lower():
print("Correct!")
score += 1
else:
print(f"Wrong! The answer was {correct_answer}!")
print(f"Your final score was {score} / {max_score}")
return score | aa0ec4940a0756f7bdf9ec1641f5f726ba1f43ae | 112,597 |
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class) | 81741052c01943c4aa7d7c1fe76a963d9ca830bb | 112,598 |
def calculate_inequality(districts, lookup):
"""
Calculate spatial inequality based on a school/district assignment and
defined districts' neighborhoods, following the (latex) definition:
\\[
\\frac{
\\sum_{j=1}^{N} \\sum_{i=1}^{N} \\left| y_i - y_j \\right|
}{
2 N \\sum_{i=1}^{N} y_i
}
\\]
Args:
districts (list of optimization.entity_nodes.District): List of all
District instances.
lookup (optimization.lookup.Lookup): Lookup instance for fast
information querying.
Returns:
float: Spatial inequality for a school/district assignment.
"""
get_per_student_funding = lambda district: district.get_total_funding() / district.get_total_students()
abs_funding_diff = lambda x,y: abs(get_per_student_funding(x) - get_per_student_funding(y))
overall_inequality = 0
normalization_factor = 0
for district in districts:
neighboring_districts = lookup.get_neighboor_districts_by_district_id(district.get_id())
full_neighborhood = [*neighboring_districts, district]
ineq_contribution = sum(map(
lambda x: abs_funding_diff(district, x),
full_neighborhood
))
overall_inequality += ineq_contribution / len(full_neighborhood)
normalization_factor += get_per_student_funding(district)
return overall_inequality / normalization_factor | c2f14d60eb09857cf6a289d86dd53005a6ffe228 | 112,600 |
def package_to_url(package_name):
"""Takes a component name and returns a fuchsia pkg url"""
return "fuchsia-pkg://fuchsia.com/{name}".format(name=package_name) | b2bed0c5c8b6584367980f3b80c901e9a461bb7a | 112,601 |
import json
def _get_type_from_str(str, default=None):
"""
Convert docstring into object suitable for inclusion as documentation. It
tries to parse the docstring as JSON, falling back on provided default
value.
"""
if str:
try:
return json.loads(str)
except ValueError:
pass
return default if default is not None else str | 48ec1591d0aa2d11bf14693a1ffed8fa4eac30cb | 112,607 |
def remove_exclusion(failed_tasks: list, playbook_exclusion: list):
"""
Checks if one of the failed tasks is from an excluded playbook and if so removes it from the list.
Args:
failed_tasks: A list of failed tasks.
playbook_exclusion: A list of names of playbooks to exclude.
Returns:
The modified list.
"""
for playbook in playbook_exclusion:
for task in failed_tasks:
if playbook in task['Playbook Name']:
failed_tasks.remove(task)
return failed_tasks | 423f0bc707379b96b6f0134129f1ee965ca36b83 | 112,609 |
def find_overlapping_location(feature, locations):
"""Finds the index of a gene location containing `feature`.
Args:
feature (SeqFeature): Feature being matched to a location
locations (list): Start and end coordinates of gene features
Returns:
int: Index of matching start/end, if any
None: No match found
"""
for index, (start, end) in enumerate(locations):
if feature.location.start >= start and feature.location.end <= end:
return index | 31dec69fadbeba2115d2ed0763759ac65fb21604 | 112,610 |
from pathlib import Path
from typing import Dict
from typing import Any
import yaml
def config_from_file(f: Path) -> Dict[str, Any]:
"""Return parsed config from a YAML file"""
return yaml.safe_load(f.read_text()) or {} | 7fe5a97100bba3334c77795301b6f6d3297f0250 | 112,611 |
def format_text_descriptions(item_parameters, *, use_html=False):
"""
Format parameter descriptions for a plan from the list of allowed plans.
Returns description of the plan and each parameter represented as formatted strings
containing plan name/description and parameter name/type/default/min/max/step/description.
The text is prepared for presentation in user interfaces. The descriptions are
represented as a dictionary:
.. code-block:: python
{
"description": "Multiline formatted text description of the plan",
"parameters": {
"param_name1": "Multiline formatted description",
"param_name2": "Multiline formatted description",
"param_name3": "Multiline formatted description",
}
}
Parameters
----------
item_parameters : dict
A dictionary of item parameters, e.g. an element from the list of existing or allowed plans.
use_html : boolean
Select if the formatted text should be returned as plain text (default) or HTML.
Returns
-------
dict
The dictionary that contains formatted descriptions for the plan and its parameters.
"""
if not item_parameters:
return {}
start_bold = "<b>" if use_html else ""
stop_bold = "</b>" if use_html else ""
start_it = "<i>" if use_html else ""
stop_it = "</i>" if use_html else ""
new_line = "<br>" if use_html else "\n"
not_available = "Description is not available"
descriptions = {}
item_name = item_parameters.get("name", "")
item_description = str(item_parameters.get("description", ""))
item_description = item_description.replace("\n", new_line)
s = f"{start_it}Name:{stop_it} {start_bold}{item_name}{stop_bold}"
if item_description:
s += f"{new_line}{item_description}"
descriptions["description"] = s if s else not_available
descriptions["parameters"] = {}
for p in item_parameters.get("parameters", []):
p_name = p.get("name", None) or "-"
p_type = "-"
p_custom_types = []
annotation = p.get("annotation", None)
if annotation:
p_type = str(annotation.get("type", "")) or "-"
for t in ("devices", "plans", "enums"):
if t in annotation:
for ct in annotation[t]:
p_custom_types.append((ct, tuple(annotation[t][ct])))
p_default = p.get("default", None) or "-"
p_min = p.get("min", None)
p_max = p.get("max", None)
p_step = p.get("step", None)
p_description = p.get("description", "")
p_description = p_description.replace("\n", new_line)
desc = (
f"{start_it}Name:{stop_it} {start_bold}{p_name}{stop_bold}{new_line}"
f"{start_it}Type:{stop_it} {start_bold}{p_type}{stop_bold}{new_line}"
)
for ct, ct_items in p_custom_types:
desc += f"{start_bold}{ct}:{stop_bold} {ct_items}{new_line}"
desc += f"{start_it}Default:{stop_it} {start_bold}{p_default}{stop_bold}"
if (p_min is not None) or (p_max is not None) or (p_step is not None):
desc += f"{new_line}"
insert_space = False
if p_min is not None:
desc += f"{start_it}Min:{stop_it} {start_bold}{p_min}{stop_bold}"
insert_space = True
if p_max is not None:
if insert_space:
desc += " "
desc += f"{start_it}Max:{stop_it} {start_bold}{p_max}{stop_bold}"
insert_space = True
if p_step is not None:
if insert_space:
desc += " "
desc += f"{start_it}Step:{stop_it} {start_bold}{p_step}{stop_bold}"
if p_description:
desc += f"{new_line}{p_description}"
descriptions["parameters"][p_name] = desc if desc else not_available
return descriptions | 2b2d0479cfbce75f5843d1aebe607f1180a82b18 | 112,613 |
def find_type(string_in):
""" Checks for int/float before settling on string """
out = string_in
try:
out = int(string_in)
except ValueError:
try:
out = float(string_in)
except ValueError:
pass
return out | 794382f3ce64db009cf5c8ef94aee62bc1b8d423 | 112,614 |
def name_from_selection(s: str) -> str:
""" extract theme name from fzf selection """
return s.split()[-1].strip() | 6aba976b3bfbb7cf9803a9426152910abda0d964 | 112,616 |
def write_list(thelist, filename):
""" write each element to a separate line in the file """
try:
fd = open(filename,'w')
except IOError:
print('Error opening file ' + filename)
return []
for list_el in thelist:
fd.write(str(list_el) + '\n')
return | dee91c615eba6025719e503039d6292f6b9d4af3 | 112,618 |
from typing import Optional
def optional_empty_str(value: Optional[str]) -> Optional[str]:
"""Parser for empty strs to None."""
if value is None:
return None
if value == "":
return None
return value | 4f51d9e593509762f00622464daf5c1d47f61d84 | 112,619 |
import random
def pick_some(sequence, num_items):
"""
Return a list of between 1 and `num_items` (inclusive) items from
`sequence`.
"""
return random.sample(sequence, random.randint(1, num_items)) | 247301fc8f276a7cc9986eff2b5d9302fad33bd8 | 112,624 |
from datetime import datetime
from typing import Any
from typing import Tuple
import pytz
def localize_time_range(begin: datetime, end: datetime, tz: Any = None) -> Tuple[datetime, datetime]:
"""
Localizes time range. Uses pytz.utc if None provided.
:param begin: Begin datetime
:param end: End datetime
:param tz: pytz timezone or None (default UTC)
:return: begin, end
"""
if tz is None:
tz = pytz.utc
return tz.localize(begin), tz.localize(end) | 04e57081efb8e5aed239c709c21bb72acaa31768 | 112,625 |
def choose(docs):
"""Print line number, title and truncated description for
each tuple in :docs. Get the user to pick a line
number. If it's valid, return the first item in the
chosen tuple (the "identifier"). Otherwise, return None."""
last = len(docs) - 1
for num, doc in enumerate(docs):
print(f"{num}: ({doc[1]}) {doc[2][:30]}...")
index = input(f"Which would you like to see (0 to {last})? ")
try:
return docs[int(index)][0]
except:
return None | d1e7b88d478f8fba5aa9ab21ad82ce0b9b6a543d | 112,627 |
def deepget(data: dict, keys: list[str]):
"""
deepget nested keys from a dictionary. Raises a KeyError on the first
missing sub-keys
"""
k = keys.pop(0)
if keys:
return deepget(data[k], keys)
else:
return data[k] | 8809ce784eefd0d580bbdb0c7e008a45b216cdea | 112,629 |
def compact(src):
"""Return a list of only truthy values in src."""
return [i for i in src if i] | 20a3c52d2c6beea9a28ed42912162d58270ca62e | 112,630 |
def fit_to(img_width, img_height, target_width, target_height):
"""
Return new dimensions for img such that it fits completely
in target_width/target_height
Actually returns (new_width, new_height, -1) to be compatible with tvtk
"""
width_factor = target_width / float(img_width)
height_factor = target_height / float(img_height)
if width_factor < height_factor:
return (target_width, width_factor * img_height, -1)
else:
return (height_factor * img_width, target_height, -1) | 3a8d470089d69dfc70d480f3366c7a89c2972000 | 112,632 |
import copy
def remove_policy_tags(schema):
"""Manually create the schema objects, removing policyTags.
Workaround for 403 error with policy tags, which are not required in a load
job: https://github.com/googleapis/python-bigquery/pull/557
"""
# Returns a copy rather than modifying the mutable arg,
# per Issue #277
result = copy.deepcopy(schema)
for field in result["fields"]:
if "policyTags" in field:
del field["policyTags"]
return result | 5941e2bd57fe339b2d2ba6b9fd12acdf147dce2f | 112,633 |
import math
def fixed_width_text(text, char_num=10):
"""Add linebreaks every char_num characters in a given text.
Parameters
----------
text: obj:'str'
text to apply the linebreaks
char_num: obj:'int'
max number of characters in a line before a line break
Default: 10
Returns
-------
obj:'str'
the text with line breaks after every char_num characters
Examples
--------
>>> fixed_width_text("12345", char_num=2)
'12\\n34\\n5'
>>> fixed_width_text("123456")
'123456'
>>> fixed_width_text("12345", 5)
'12345'
>>> fixed_width_text("", 2)
''
"""
# integer number of lines of `char_num` character
n_lines = math.ceil(len(text) / char_num)
# split the text in lines of `char_num` character
split_text = []
for i in range(n_lines):
split_text.append(text[(i * char_num):((i + 1) * char_num)])
return "\n".join(split_text) | 12bbaa276b95a852a794d080131f37ef5fe0a635 | 112,635 |
import math
def compute_idf(word, documents):
"""
compute the idf for a single word
"""
num_occurs = 0
for document in documents:
if(word in documents[document]):
num_occurs += 1
return math.log(len(documents)/num_occurs) | f0b7d32d2ca6d1604df1fc7eda12bba557d35a52 | 112,637 |
def prob2odds(p):
""" Return the odds for a given probability p """
# Note: in the case of the usual game, we do not have to handle impossible events (e.g if a horse cannot win), and so this equation will never result in
# divion by zero.
return (1-p) / p | 41339b37ca553fda0289472aa9b31e90e6da2b76 | 112,640 |
def _match_fn(matchobj):
"""Return original <a href...> with `target='_blank'` inserted"""
s = matchobj.group(0)
return '{} target="_blank" {}'.format(s[:2], s[3:]) | b7a00343b40ce7b15545e817901ad7248783ddc1 | 112,642 |
def get_node_ids(iface_list):
""" Returns a list of unique node_ids from the master list of
interface dicts returned by get_iface_list()
"""
# Casting as a set() removes duplicates
# Casting back as list() for further use
return list(set([i["node_id"] for i in iface_list])) | c72a443be1bb3d7f133c9848b1e5ea7678a24586 | 112,644 |
def border_style(keyword):
"""``border-*-style`` properties validation."""
return keyword in ('none', 'hidden', 'dotted', 'dashed', 'double',
'inset', 'outset', 'groove', 'ridge', 'solid') | dcddd53f39d603b1f0a04ed017dbc2857efdde88 | 112,655 |
def extract_order(tup):
"""Extract the order from a tuple of parameters"""
p, d, q, P, D, Q, s, k = tup
return (p, d, q), (P, D, Q, s), k | 4590baa7b0ffe322529c2771e980c7a35b49376a | 112,656 |
def make_operand_mask(mask_string):
"""
Convert a binary mask string with variable characters and bits to an '&', and '==' mask.
This is used to be able to match an instruction word to the instruction table entry.
"""
and_mask = cmp_mask = 0
for c in mask_string:
and_mask <<= 1
cmp_mask <<= 1
if c == '0':
and_mask |= 1
elif c == '1':
and_mask |= 1
cmp_mask |= 1
return and_mask, cmp_mask | 9b16d5c155f0687341ce166ee302c5d4dc4ff1ad | 112,660 |
def between(x, left, right, inclusive="both"):
"""Function version of `left <= x <= right`, works for both scalar and
vector data
See https://dplyr.tidyverse.org/reference/between.html
Args:
x: The data to test
left: and
right: The boundary values (must be scalars)
inclusive: Either `both`, `neither`, `left` or `right`.
Include boundaries. Whether to set each bound as closed or open.
Returns:
A bool value if `x` is scalar, otherwise an array of boolean values
Note that it will be always False when NA appears in x, left or right.
"""
return x.between(left, right, inclusive) | a9ed7654c057637d33b2c1b0093e701d07b181de | 112,663 |
def array_4bit_to_byte(array):
"""Convert a 2048-byte array of 4096 4-bit values to an array of 4096 1-byte values.
The result is of type bytearray().
Note that the first byte of the created arrays contains the LEAST significant
bits of the first byte of the Data. NOT to the MOST significant bits, as you
might expected. This is because Minecraft stores data in that way.
"""
def iterarray(array):
for b in array:
yield(b & 15) # Little end of the byte
yield((b >> 4) & 15) # Big end of the byte
return bytearray(iterarray(array)) | de1cb77ff857122de412002082158be6a3a5716e | 112,666 |
import requests
import time
import json
def getinfo(IDs):
"""
Pulls and returns data about specific loans from the Kiva API.
Includes a time sleep to ensure that usage limits aren't exceeded.
:param IDs: A list of up to 100 loan ids to get info for.
:return loans: A list of dictionaries containing the full information.
"""
response = requests.get("http://api.kivaws.org/v1/loans/" + IDs + ".json",
params={"appid": "com.woodside.autotag"})
time.sleep(60 / 55)
loans = json.loads(response.text)["loans"]
return loans | bd67598700b7eb4e75af0c463187698b4aa85817 | 112,673 |
def bind_rt_nodes(srcnodelist, newnodes_list):
"""Return srcnodelist with all RuntimeNodes replaced by nodes
coming from newnodes_list.
"""
memo = {}
newnodes_iter = iter(newnodes_list)
result = [node.bind_rt_nodes(memo, newnodes_iter) for node in srcnodelist]
rest = list(newnodes_iter)
assert rest == [], "too many nodes in newnodes_list"
return result | a9141767b177a8dbbfcc291d3b4415a197872818 | 112,678 |
def float_pop(d, k):
"""Pop a value from dict by key, then convert to float if not None.
Args:
d (dict): dict to remove key k from.
k: Key to pop from dict d.
Returns:
v: Value popped from dict d, convert to float if not None.
"""
v = d.pop(k)
if v is not None:
return float(v)
return v | bcb8bed1f5e43f905769dfb8373685cec4816f47 | 112,681 |
import configparser
def get_config(path):
"""
Getting configuration file
path - path to config file
"""
config = configparser.ConfigParser()
config.read(path)
return config | 6047880c56435cf1c1404e7bef299538d5df0255 | 112,687 |
def open_stopwords(stop_words_file_path):
"""
opens the stop words file and provides a set which can be used to filter tokens/words later
:param stop_words_file_path: the path to find the stop words file in the local directory
:return: a set of stop words to filter out
"""
# create a list of the stop_words
with open(stop_words_file_path, "r") as f:
stop_words = f.read().split("\n")
return set(stop_words) | 1afa43933c4d364001b4e5414bd217fed7a49a72 | 112,689 |
def group_gen_by_year_fuel_primemover(df):
"""
Group generation and fuel consumption by plant, prime mover, and fuel type. Only
matters where multiple years of data are used, otherwise output should be the same
as input.
Parameters
----------
df : dataframe
Generation and fuel consumption data from EIA 923 for each plant, prime mover,
and fuel type
Returns
-------
dataframe
Sum of generation and fuel consumption data (if multiple years).
"""
# Group the data by plant, fuel type, and prime mover
by = [
"plant_id_eia",
"fuel_type",
"fuel_type_code_pudl",
"fuel_type_code_aer",
"prime_mover_code",
]
annual_gen_fuel_923 = (
(
df.drop(columns=["id", "nuclear_unit_id"])
.groupby(by=by, as_index=False)[
"fuel_consumed_units",
"fuel_consumed_for_electricity_units",
"fuel_consumed_mmbtu",
"fuel_consumed_for_electricity_mmbtu",
"net_generation_mwh",
]
.sum()
)
.reset_index()
.drop(columns="index")
.sort_values(["plant_id_eia", "fuel_type", "prime_mover_code"])
)
return annual_gen_fuel_923 | 504dd16307f829d091c3f96bc1aade2ceb4a3ba7 | 112,691 |
def preserve_location_hint(config, response):
"""Preserve location hint in client config for subsequent requests"""
if response and response.get("target_location_hint_cookie"):
config["target_location_hint"] = response.get("target_location_hint_cookie").get("value")
return response | 61d4cb4c7d2f1f6b404051264cca8441af06e4bf | 112,694 |
def _feature_accuracy(feature_stats):
"""Computes accuracy from the supplied counters."""
return (feature_stats["correct"] / feature_stats["total"] * 100.0
if feature_stats["total"] != 0.0 else 0.0) | a3a737a8f374eb5e1fb0afc6fcb3bae8bec437e5 | 112,703 |
from typing import Tuple
from typing import List
def create_tetrahedron() -> Tuple[List[float], List[int]]:
"""
Tetrahedron
(3)
|
|
|
|
(0)--------(1)
/
/
/
(2)
"""
points = [
# X, Y, Z
0., 0., 0., # 0
1., 0., 0., # 1
0., 1., 0., # 2
1., 1., 1., # 3
]
faces = [
0, 1, 2,
0, 1, 3,
1, 2, 3,
2, 0, 3,
]
return points, faces | c5d0b3fb0d753c228c20ddbf4d9e2bf13e1faa62 | 112,706 |
def generate_progressbar(percentage):
"""
Generates the ASCII-art style progress bar, and appends the percentage number in German locale number format
"""
num_chars = 15
num_filled = round((percentage/100)*num_chars)
num_empty = num_chars-num_filled
display_percentage = str(percentage).replace(".", ",")
msg = "{}{} {}%".format("▓"*num_filled, "░"*num_empty, display_percentage)
return msg | ce36f8e712f8da63d7066cdcb82d44d8749a9a93 | 112,707 |
def valid_mean(tensor, valid=None, dim=None):
"""Mean of ``tensor``, accounting for optional mask ``valid``,
optionally along a dimension."""
dim = () if dim is None else dim
if valid is None:
return tensor.mean(dim=dim)
valid = valid.type(tensor.dtype) # Convert as needed.
return (tensor * valid).sum(dim=dim) / valid.sum(dim=dim) | 1b6c89f1483f61024a0d3c7083c845459206e23d | 112,708 |
from typing import List
def txt_python_func(func_list: List[str], func_type: str) -> str:
"""This method creates strings from which the python file in which the simulation functions are defined is created.
func_list contains the names of all process functions for which a string representation should be generated.
func_type contains the type of function to be created, possible cases are:
'p' -> process functions
's' -> sources and sinks
'g' -> global functions
"""
# Make a list of unique elements
func_list = list(set(func_list))
# Create functions signature based on the function type
signature: str = ''
if func_type == 'p':
signature = 'env, item, machine, factory'
elif func_type == 's':
signature = 'env, factory'
elif func_type == 'g':
signature = 'env, factory'
txt_func: str = ''
for func in func_list:
txt_func += 'def ' + func + '(' + signature + '):\n\n\tpass\n\n'
return txt_func | 443aadda78530493ec9a8007912f1925f517a0d1 | 112,713 |
import re
def capitalization_density(text):
"""Returns the word-starting capitalized character density of the given
text.
Arguments:
text (str): The input text.
Returns:
(float): The density of capitalized words in the text.
"""
if len(text) == 0:
return 0
words = re.sub(r'\W', ' ', text).split()
caps = float(sum([1 for x in words if re.match('[A-Z]', x)]))
return (caps / len(words)) if len(words) > 0 else 0.0 | 535dfe0f0e6b4168971ef19af0b00bf1927ac83b | 112,715 |
def year_to_numbers(year):
"""
Convert the given year into an array of digits.
e.g. year_to_numbers(1990) -> [1, 9, 9, 0]
"""
result = []
for i in 1000,100,10,1:
result.append(int(year / i))
year = year % i
return result | 8f7aa085bfd6a107d183a994f3e0a1749dea7e96 | 112,717 |
import copy
def tree_map(func, node):
"""
Returns a copy of the tree, with each node replaced with ``func(node)``.
:param func: A transformer function.
:type func: typing.Callable[[d20.ast.ChildMixin], d20.ast.ChildMixin]
:param node: The root of the tree to transform.
:type node: d20.ast.ChildMixin
"""
copied = copy.copy(node)
for i, child in enumerate(copied.children):
copied.set_child(i, tree_map(func, child))
return func(copied) | c2787409f6d5e738fe619b8a609a0a00b58c178b | 112,718 |
def _extract_class(frame_locals):
"""
See https://stackoverflow.com/questions/2203424/python-how-to-retrieve-class-information-from-a-frame-object/2544639#2544639
for the details behind the implementation. The way to use to extract class from method relies on the fact that
"self" is passed as an argument in the function and it points to the class which owns this function.
"""
try:
return frame_locals['self'].__class__.__name__
except Exception:
# Fail to get the class name should not block the whole sample
return None | 7515db17d66d6791ae363bb839b3747ef737d3ea | 112,727 |
def splitn_str(your_string, n):
""" Given a string, returns a list with that string splitted each n characters """
return [your_string[i:i+n] for i in range(0, len(your_string), n)] | b030c83e924dc830585a0996987f2ff9645fb0ff | 112,731 |
from typing import Tuple
def expand_class_ref(cls_ref: str) -> Tuple[str, str]:
"""
>>> expand_class_ref('test.test.Test')
('test.test', 'Test')
"""
parts = cls_ref.rpartition(".")
return parts[0], parts[-1] | 6bbbc66f465119f189247da74aae48e0d37c6ce0 | 112,732 |
def get_changed_data_from_form(form):
"""Returns a dictionary with keys of all changed fields
and values that are a dict with 'before' and 'after' keys
values of 'before' and 'after' are display values
for example:
{
'First name': {
'before': 'George',
'after': 'Jorge'
},
'Date of birth': {
'before': 'February/6/1791',
'after': '2/6/1791'}
}
}
Expects fields prefixed with 'existing_' in order to make that comparison
"""
changes = {}
existing_data_form = form.__class__(
form.raw_input_data, prefix='existing_', validate=True,
skip_validation_parse_only=True)
for field in form.iter_fields():
after = field.get_display_value()
existing_data_field = existing_data_form.fields[field.context_key]
before = existing_data_field.get_display_value()
if before != after:
changes[field.get_display_label()] = {
'before': before,
'after': after}
return changes | 421d9eee266baa9d02bd4034b1b3a56dfaacf5b1 | 112,734 |
def convert_in_bit_format(ip_value) -> str:
"""
This function will receive a value in parameter an
convert it in a bit format.
:param ip_value: Can be a mask or an IP address
:return str: ip_value in bit format
"""
all_bytes = ip_value.split(".")
assert all_bytes.__len__() == 4
bit_format = ""
for bytes in all_bytes:
# [2:] for remove "0b"
bit_format = bit_format + str(bin(int(bytes))[2:].zfill(8))
return bit_format | 9f02c2544fd49f2f2fcf4a5bf12aa2e79fe24f91 | 112,737 |
from typing import Dict
def get_score(tree_node: Dict, features: Dict) -> float:
"""
Given a tree node and corresponding features, retrieve the weight.
Recursive function. Depth is limited to the maximum tree depth in the forest.
:param tree_node: Node or Leaf of the tree.
:param features: Features to base decisions on, in the form of a sparse dict.
:return: The score resulting from the input features.
"""
# Example Node: {"f": 470, "c": 0, "u": "l", "l": {}, "r": {}}
# Example Leaf: {"v": 32.0}
if "v" in tree_node:
return tree_node["v"]
else:
fidx = str(tree_node["f"])
if fidx not in features:
return get_score(tree_node[tree_node["u"]], features)
elif features[fidx] < tree_node["c"]:
return get_score(tree_node["l"], features)
else:
return get_score(tree_node["r"], features) | 1b40d2fb217dc9d261c064f266c504a653d991fc | 112,742 |
import zlib
def decompress_gzip(foo: bytes) -> bytes:
"""Decompress using zlib with a GZIP header."""
return zlib.decompress(foo, wbits=zlib.MAX_WBITS + 16) | 4f07f576b38c63cbe6fc72bbff279cd5d597aa24 | 112,747 |
def get_history_date(history_item):
"""
Returns the date of a history item
"""
return history_item['date'] | fb84e3cad649484251e033a99cc3a9887d0dd142 | 112,748 |
def cipher(text, shift, encrypt=True):
"""
This is a function to encode and decode texts.
Each letter is replaced by a letter some fixed number of positions down the alphabet.
Parameters (Inputs)
----------
text : str
A string of texts to encrypt or decrypt.
shift : int
An integer representing how many digits of position you want the texts to be shifted.
encrypt : boolean
A boolean stating the choice encrypt (if = True) or decrypt (if = False).
Returns (Output)
-------
str
A string of the encrypt or decrypt result of the input text.
Examples
--------
>>> from cipher_rl3167 import cipher_rl3167
>>> text = "I love MDS"
>>> shift = 1
>>> cipher_rl3167.cipher(text, shift, encrypt = True)
'J mpwf NET'
>>> text = "J mpwf NET"
>>> cipher_rl3167.cipher(text, shift, encrypt = False)
'I love MDS'
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text | c5ddaa924b4394ab49deececa626c5473313b820 | 112,749 |
def ready(req, resp):
""" Proof of life """
return "yes" | 62ef0e79a3b60647a462fb6373f59cf7b748f1f8 | 112,757 |
def gl_create_user(gl, email, password, username, last_name, first_name):
"""
Creates a user on a GitLab instance. May throw an exception (e.g. if it already exists).
:param gl: GitLab instance
:param email: The user's email address
:param password: The user's default password
:param username: The user's username
:param last_name: The user's last name
:param first_name: The user's first name
:return: A handle to the newly created user
"""
return gl.users.create({
'email': email,
'password': password,
'username': username,
'name': first_name + ' ' + last_name
}) | 6cde8af2c9d824825cc7d5d728e93a3cb57a118b | 112,759 |
def parse_test_cases(bs_file):
"""
Parse the .xml texts by test cases
:param bs_file: the BeautifulSoup object containing the whole text of .xml file (the output of open_xml)
:return: a list containing every test case's .xml text as separate elements
"""
return bs_file.find_all("testcase") | 6b5177b5f4f94989c1da2600e25178ae04160c06 | 112,768 |
def tabs(num: int) -> str:
"""Compute a blank tab"""
return " " * num | e9261c94fe68a275abc68e12ef2d042b0cddb651 | 112,769 |
def p_contain_resist_uty(p_li, p_e, p_b, γ_m, γ_SCPC):
"""Pressure containment resistance unity check.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.2.1 eq:5.6 p:93
(press_contain_resis_unity)
"""
p_cont_res_uty = (p_li - p_e) * γ_m * γ_SCPC / p_b
return p_cont_res_uty | 46fc21186ab266ef148e322965d95b5f7023180d | 112,771 |
def get_topk(scores, items, k):
"""
given a 1d tensor of scores, and a list of corresponding items, returns a dictionary
{'scores': [s1, s2, s3, ..], 'text': [x1, x2, x3, ...]}, where s1 is the highest score,
and x1 is the corresponding item, s2 the second highest, and so on.
"""
vals, ixs = zip(*[(val.item(), items[ix]) for val, ix in zip(*scores.topk(k))])
return {'score': vals, 'text': ixs} | 8a96ee0e5ea31d3f355565870c86a17355f1a1de | 112,776 |
import time
def get_runtime(start_time, p=3):
"""
Takes a start time and optional decimal precision p,
returns a string of the total run-time until current
time with appropriate units.
"""
total = time.time() - start_time # start with seconds
divided = total/60.0
if divided < 2:
run_time = total
units = "seconds"
elif divided < 60:
run_time = divided
units = "minutes"
else:
run_time = divided/60.0
units = "hours"
rounded = round(run_time, p)
return "{} {}".format(rounded, units) | 340cc0dfd2695e589bb38010d24984f29be05b56 | 112,778 |
def num_permutations(list_of_lists):
"""
Returns the number of permutations from an input
list of lists, representing domains.
"""
if not list_of_lists:
return 0
count = 1
for lst in list_of_lists:
count *= len(lst)
return count | 1d5865a9e012bcf932473f88366a6155ce3eeff4 | 112,784 |
def getChargeIndex(chargeFlags):
"""
Called from writeToCsv(), this function returns a list of the charges that have been selected by the user in the
form [1,3,5] etc.
:param chargeFlags: a list of bools, where bool at index i denotes if the user has selected to consider charge
state i+1. That is, [True, False, False, False, True] means the user has chosen to consider +1 and +5 charge.
:return chargeHeaers: a list of which charges are being considered. Eg: [1, 5]
"""
chargeHeaders = [i for i, e in enumerate(chargeFlags) if e]
return chargeHeaders | 70429bb446fcbee56398c73ba6ad22760364aafe | 112,790 |
def _make_even(n):
"""Return largest even integer less than or equal to `n`."""
return (n >> 1) << 1 | 885edad0c42f1d7867cb317483dee30f1ab36eaa | 112,791 |
def is_subdict(X, Y):
"""
checks whether X is contained by Y, i.e., whether X is a "sub-dictionary" of Y.
returns bool.
"""
return set(X.items()).issubset(set(Y.items())) | 133c3d3af3ac82fe2daea7837bcafceaf04af774 | 112,798 |
def _parse_cisco_mac_address(cisco_hardware_addr):
"""
Parse a Cisco formatted HW address to normal MAC.
e.g. convert
001d.ec02.07ab
to:
00:1D:EC:02:07:AB
Takes in cisco_hwaddr: HWAddr String from Cisco ARP table
Returns a regular standard MAC address
"""
cisco_hardware_addr = cisco_hardware_addr.replace(".", "")
blocks = [
cisco_hardware_addr[x : x + 2] for x in range(0, len(cisco_hardware_addr), 2)
]
return ":".join(blocks).upper() | bd93984f195e4051ac60baf5f48dd225017d1261 | 112,799 |
def possessive(string):
"""
Determines whether to place an 's or just an ' at the end of a string, to represent possession.
Arguments:
string (string): The string to evaluate.
Returns:
string (string): The final result with the attached possession.
"""
if string[-1] == 's':
string = f"{string}\'"
else:
string = f"{string}'s"
return string | 48a53e2e3b797e13e7b394ea34caa14b720962de | 112,801 |
def calculate_number_of_conditions(conditions_length, max_conditions):
"""
Every condition can hold up to max_conditions, which (as of writing this) is 10.
Every time a condition is created, (max_conditions) are used and 1 new one is added to the conditions list.
This means that there is a net decrease of up to (max_conditions-1) with each iteration.
This formula calculates the number of conditions needed.
x items in groups of y, where every group adds another number to x
Math: either math.ceil((x-1)/(y-1))
or math.floor((x+(y-1)-2)/(y-1)) == 1 + (x-2)//(y-1)
:param int conditions_length: total # of conditions to handle
:param int max_conditions: maximum number of conditions that can be put in an Fn::Or statement
:return: the number (int) of necessary additional conditions.
"""
num_conditions = 1 + (conditions_length - 2) // (max_conditions - 1)
return num_conditions | 547bbdf43256d1a0b8514bb0d3fd8cb93f29c1dd | 112,805 |
def flip_dict(d):
"""Swap the keys and values in a dictionary, where duplicated values map to a list of keys
The easiest way to solve this problem is by looping over the keys and values
of the dictionary using `.items()`. If the value hasn't been seen before, we need
to set some reasonable default value in our dictionary - in this case, a list to
store the associated keys. Then, we can append (in-place!) our key to the list
of keys associated to this value.
It's possible to write this update step in one line using the default of `.get()`
out[value] = out.get(value, []).append(key)
However, I don't think that looks nearly as readable.
Note: there is a tool in the `collections` module from the standard library
called `defaultdict` which exports this functionality. You provide it a factory
method for creating default values in the dictionary (in this case, a list.)
"""
out = {}
for key, value in d.items():
if value not in out:
out[value] = []
out[value].append(key)
return out | 024f447fc107da1b714f4ac7d66ad634fb7de30e | 112,807 |
def clean_text (product):
""" Clean product or supplier text: delete extra spaces and clean links"""
# Clean text
text_clean = product.replace("\n", "").replace(" ", "").strip()
# Replace characters
text_clean = text_clean.replace("&", "&")
# Delete links
link_end = text_clean.find("</a>")
if link_end != -1:
# Delete start link
link_start_open = text_clean.find("<a")
link_start_close = text_clean.find(">")
link_start = text_clean[link_start_open-1:link_start_close+1]
text_clean = text_clean.replace(link_start, " ")
# Delete end link
text_clean = text_clean.replace("</a>", "")
# Clean again
text_clean = text_clean.strip()
return text_clean | ae41245ccd27682ccf10f6343bab1cd529cca3f0 | 112,816 |
import re
def get_class(file):
"""From a file return a dict or input and output readers.
Parameters
----------
file : str
Plugin file to parse.
Returns
-------
(list, list)
List of input reader and list of output reader.
"""
all_reader_input = []
all_reader_output = []
with open(file, "r") as f:
contents = f.read()
match = re.findall("class (\w+)\(AbstractNetworkReader\)", contents, re.MULTILINE)
if match is not None:
for reader in match:
all_reader_input.append(reader)
match = re.findall(
"class (\w+)\(AbstractTrafficDataReader\)", contents, re.MULTILINE
)
if match is not None:
for reader in match:
all_reader_output.append(reader)
return all_reader_input, all_reader_output | 7b85a454921ddc40b0e6b57526bb98cd5fed77cf | 112,819 |
import difflib
def record_diff(old, new):
"""
Generate a human-readable diff of two performance records.
"""
return '\n'.join(difflib.ndiff(
['%s: %s' % (k, v) for op in old for k, v in op.items()],
['%s: %s' % (k, v) for op in new for k, v in op.items()],
)) | d4e8c5ff9b9bd7c9a1c4e9e95872269855034a00 | 112,823 |
import math
def binary_search(arr, target):
"""
Performs a binary search
- Time complexity: O(log(n))
- Space complexity: O(1)
Args:
arr (list): List of sorted numbers
target (float): Target to find
Returns:
mid (int): Index of the target. Return -1 if not found
"""
left = 0
right = len(arr) - 1
while left <= right:
mid = math.floor((left + right) / 2)
if arr[mid] < target:
left = mid + 1
elif arr[mid] > target:
right = mid - 1
else:
return mid
return -1 | a8bf614fc430d848744135d5ae9792704b8fa063 | 112,828 |
def make_divisible(v, divisor=8, min_value=None):
"""
The channel number of each layer should be divisable by 8.
The function is taken from
github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py
"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | 4bd9c6b600d156d90d73046834643327f10e75bb | 112,829 |
def get_id_from_strat_profile(num_strats_per_population, strat_profile):
"""Returns a unique integer ID representing the requested strategy profile.
Map any `strat_profile` (there are `np.prod(num_strats_per_population)` such
profiles) to {0,..., num_strat_profiles - 1}.
The mapping is done using a usual counting strategy: With
num_strats_per_population = [a1, ..., a_n]
strat_profile = [b1, ..., b_n]
we have
id = b_1 + a1 * (b2 + a_2 * (b3 + a_3 *...))
This is helpful for querying the element of our finite-population Markov
transition matrix that corresponds to a transition between a specific pair of
strategy profiles.
Args:
num_strats_per_population: List of strategy sizes for each population.
strat_profile: The strategy profile (list of integers corresponding to the
strategy of each agent) whose ID is requested.
Returns:
Unique ID of strat_profile.
"""
if len(strat_profile) == 1:
return strat_profile[0]
return strat_profile[-1] + num_strats_per_population[-1]*\
get_id_from_strat_profile(num_strats_per_population[:-1],
strat_profile[:-1]) | c97a8129f2dfa5534703c5a5340cfc2a02389705 | 112,837 |
def get_trusted_advisor_checks(client):
"""
Creates a list of dicts containg check information
[{checkId: <val>, metadata: <val>}, ...]
:return: checks
:param client
"""
checks = []
ta_checks = client.describe_trusted_advisor_checks(
language='en'
)
for check in ta_checks["checks"]:
checks.append({'checkId': check['id'], 'metadata': check['metadata']})
return checks | fe8174e15f37dfcab0f89fc7060b3dda2916f297 | 112,840 |
import re
def ToLowerCamel(name):
"""Convert a name with underscores to camelcase."""
return re.sub('_[a-z]', lambda match: match.group(0)[1].upper(), name) | 240e8bb697b27258301aa6d8ae9079f048a8d040 | 112,841 |
from datetime import datetime
def timestamp(t=None):
"""
The timestamp for ooni reports follows ISO 8601 in
UTC time format.
We do not inlcude ':' and include seconds.
Example:
if the current date is "10:12:34 AM, June 23 1912" (datetime(1912, 6,
23, 10, 12, 34))
the timestamp will be:
"1912-06-23T101234Z"
Args:
t (datetime): a datetime object representing the
time to be represented (*MUST* be expressed
in UTC).
If not specified will default to the current time
in UTC.
"""
if t is None:
t = datetime.utcnow()
ISO8601 = "%Y-%m-%dT%H%M%SZ"
return t.strftime(ISO8601) | 443c36a349cd5df1a78002d195e53e0e5d18b36b | 112,843 |
def identity(x):
"""
A named identity function is nicer than `lambda x: x`.
"""
return x | f99054c91cfd524aee1d196be8d70b506946d6a7 | 112,850 |
import torch
def try_gpu(e):
"""Send given tensor to gpu if it is available
Args:
e: (torch.Tensor)
Returns:
e: (torch.Tensor)
"""
if torch.cuda.is_available():
return e.cuda()
return e | 0ae07bcf828f807d87009871d74532f3b9bf7dcf | 112,852 |
import torch
def reparameterize(mean: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
""" The reparameterization trick (https://arxiv.org/abs/1312.6114) in a Gaussian distribution.
Args:
mean (torch.Tensor): The mean of the distribution.
logvar (torch.Tensor): The log-variance of the distribution.
Returns:
torch.Tensor: sampled values
"""
std = torch.exp(logvar * 0.5)
eps = torch.randn_like(std)
return mean + std * eps | 8314e9f69aec5fe9f5582435ae96658dc29062b9 | 112,853 |
from datetime import datetime
def make_output_dir_name(args):
"""Constructs a unique name for a directory in ./output using
current time and script arguments"""
prefix = datetime.now().strftime('%Y%m%d-%H%M')
dir_name = f'./output/{prefix}_epochs={args.epochs}_lr={args.lr}'
dir_name += '_with-pretrained-backbone' if args.pretrained_backbone else '_no-pretrained-backbone'
if args.no_geometry_loss:
dir_name += '_no-geometry-loss'
if args.resume:
# Extract date prefix from checkpoint path:
# e.g. 20210320-1439 in output/20210320-1439_epochs=1_lr=0.005/checkpoint.pth
dir_name += f'_resume={str(args.resume.parent.name).split("_")[0]}'
return dir_name | 7d8c4c150af25c37d437a58231718eb055bbaaa2 | 112,854 |
def _contains(search_str, attr_str):
"""Contains-operator which is used to find a view"""
return search_str in attr_str | 652e390b291a9099b7ac004cbfae199af23000cf | 112,858 |
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0 | 7fd1024cde81b7f99142889e0f389c679ad11fc4 | 112,861 |
def assign_cm(val, len, wf):
"""
assign_cm is a simple helper function used in compare_events
Arguments:
val: string value to specify which area of the confusion matrix this point belongs to: 'tp', 'fp', or 'fn'
len: how long the total array should be
wf: integer widening factor that determines how many points should be turned into 'tn' on both edges
Returns:
cm: series of length len, with wf 'tn' at the beginning and the end filed with val in between
"""
cm = ['tn' for i in range(len)]
for i in range(wf, len - wf):
cm[i] = val
return cm | efbf7387a90581b271902d7a6ce017c205ffd4af | 112,862 |
from typing import List
import random
def random_matrix(a:int,b:int) -> List[List[int]]:
"""
Generate a random boolean matrix, each row and each column contains 1 somewhere.
Parameters
----------
a : number of rows.
b : number of columns.
Returns
-------
m : a random matrix of size axb
"""
br=list(range(b))
random.shuffle(br)
# empty matrix
mm = [[0 for _ in range(b)] for _ in range(a)]
# for each row
for i in range(a):
mm[i][br[i]]=1
# for each column
for j in range(b):
mm[random.randrange(a)][j]=1
return mm | a143292b77ced28548e012fc1a12cce939dc7c12 | 112,864 |
import importlib
def create_loss(hypes):
"""
Create the loss function based on the given loss name.
Parameters
----------
hypes : dict
Configuration params for training.
Returns
-------
criterion : opencood.object
The loss function.
"""
loss_func_name = hypes['loss']['core_method']
loss_func_config = hypes['loss']['args']
loss_filename = "opencood.loss." + loss_func_name
loss_lib = importlib.import_module(loss_filename)
loss_func = None
target_loss_name = loss_func_name.replace('_', '')
for name, lfunc in loss_lib.__dict__.items():
if name.lower() == target_loss_name.lower():
loss_func = lfunc
if loss_func is None:
print('loss function not found in loss folder. Please make sure you '
'have a python file named %s and has a class '
'called %s ignoring upper/lower case' % (loss_filename,
target_loss_name))
exit(0)
criterion = loss_func(loss_func_config)
return criterion | 7e5a84d49f63c69bf8522922da2f1eb3cbd9c931 | 112,865 |
def BKJD2BJD(bkjd):
"""convert BKJD to BJD"""
return bkjd + 2454833.0 | 98cf43e35d79d6dc82b0b4463aa815fe901cb3b8 | 112,867 |
def summary_format(summary_dict):
"""
formats summary dicts into desired format
:param summary_dict: dictionary with percents per
:return: str of dict in desired format
"""
return str(summary_dict.items()) | 5f3e34960f6079e06b5b2a87fb3929225f353fcb | 112,870 |
def getAssetBasePrice(Q, Sq, R, S):
"""
Calculates and returns the base asset price
"""
return (Q/Sq)/(R/S) | 4376a6297f929d59213078b026ff48bed2b26c12 | 112,871 |
def remove_white_space(string):
""" Removes white space from the given string
:param string: The string in question
:return: The resulting string without whitespace
"""
string = string.replace('\\n', ' ')
string = string.replace('\\t', ' ')
return "".join(string.split()) | 0798d6c076d92fb591e810655244a419d35290ef | 112,873 |
def nodes_in_states(baremetal_client, states):
"""List the introspectable nodes with the right provision_states."""
nodes = baremetal_client.node.list(maintenance=False, associated=False)
return [node for node in nodes if node.provision_state in states] | 55c89718ef006b0385d801a88e8443f5992f1b06 | 112,878 |
def factorialTrailingZeros(n):
"""
Function to count the number of trailing 0s in a factorial number.
Parameters:
n (int); the number for which the factorial and trailing 0s are to be calculated.
Returns:
trailingZeros (int); the number of 0s in the calculated factorial number.
"""
try:
if not(isinstance(n,int)) or (n<0): #If n is not a positive int
raise TypeError
ans = 1
trailingZeros = 0
# Calculating the factorial of 'n'
while n >= 1: # Loop stops when n becomes 0
ans *= n
n -= 1
# Calculating the number of 0s in 'ans'
while float(ans % 10) == 0: # Loop stops when 'ans' is not divisible by 10, in other words it no longer has 0s in it.
trailingZeros += 1
ans = ans // 10
return trailingZeros
except:
print("Error: Invalid input. Please try again with a positive integer only.")
return "Failed" | 86a17c160ff8d14a934fbd77bfa8f925fc1a1124 | 112,880 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.