content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def ensure_all_alternatives_are_chosen(alt_id_col, choice_col, dataframe):
"""
Ensures that all of the available alternatives in the dataset are chosen at
least once (for model identification). Raises a ValueError otherwise.
Parameters
----------
alt_id_col : str.
Should denote the column in `dataframe` that contains the alternative
identifiers for each row.
choice_col : str.
Should denote the column in `dataframe` that contains the ones and
zeros that denote whether or not the given row corresponds to the
chosen alternative for the given individual.
dataframe : pandas dataframe.
Should contain the data being used to estimate the model, as well as
the headers denoted by `alt_id_col` and `choice_col`.
Returns
-------
None.
"""
all_ids = set(dataframe[alt_id_col].unique())
chosen_ids = set(dataframe.loc[dataframe[choice_col] == 1,
alt_id_col].unique())
non_chosen_ids = all_ids.difference(chosen_ids)
if len(non_chosen_ids) != 0:
msg = ("The following alternative ID's were not chosen in any choice "
"situation: \n{}")
raise ValueError(msg.format(non_chosen_ids))
return None | 52ad5254951a1ac09a8dc20136494587338063cb | 112,245 |
def format_resource_id(resource, separator=":"):
"""Concatenates the four-part ID for a resource record.
:param dict resource: an ArchivesSpace resource.
:param str separator: a separator to insert between the id parts. Defaults
to `:`.
:returns: a concatenated four-part ID for the resource record.
:rtype: str
"""
resource_id = []
for x in range(4):
try:
resource_id.append(resource["id_{0}".format(x)])
except KeyError:
break
return separator.join(resource_id) | e4b286a1702c9ec62adb45885e66abd405cb9ca2 | 112,249 |
def get_dynamic_descriptor_children(descriptor, user_id, module_creator=None, usage_key_filter=None):
"""
Returns the children of the given descriptor, while supporting descriptors with dynamic children.
"""
module_children = []
if descriptor.has_dynamic_children():
module = None
if descriptor.scope_ids.user_id and user_id == descriptor.scope_ids.user_id:
# do not rebind the module if it's already bound to a user.
module = descriptor
elif module_creator:
module = module_creator(descriptor)
if module:
module_children = module.get_child_descriptors()
else:
module_children = descriptor.get_children(usage_key_filter)
return module_children | 76c18eb535bf95e2b7e2a0015134f6a546bbb944 | 112,254 |
from pathlib import Path
def check_directory_exists(path: str):
"""
Given directory path check if directory exists
:param path: Directory path
:return: True if directory exists, False otherwise
"""
exist = False
my_dir = Path(path)
if my_dir.is_dir(): # directory exist
exist = True
return exist | f376680f1f6d36d5a56b3e4933212dd1b494ddc6 | 112,255 |
def _get_variants(data):
"""Retrieve set of variant calls to use for heterogeneity analysis.
"""
supported = ["vardict", "vardict-java", "vardict-perl", "freebayes", "mutect"]
out = []
for v in data.get("variants", []):
if v["variantcaller"] in supported:
out.append((supported.index(v["variantcaller"]), v))
out.sort()
return [xs[1] for xs in out] | da61fb5ea9a699b6145120ab3d8b7485daab551c | 112,258 |
def setting_attrs(obj, **attrs):
"""
Save all given keyword arguments as attributes to obj and then return obj.
"""
for k, v in attrs.items():
setattr(obj, k, v)
return obj | a2959d3ae51fc8a6a437130ee6c2001bf16f4c50 | 112,259 |
def real_attenuation(original_extract, real_extract):
"""
Real Attenuation
:param float original_extract: Original degrees Plato
:param float real_extract: Real degrees Plato of finished beer
:return: The percent of real attenuation
:rtype: float
"""
return (original_extract - real_extract) / original_extract | b8f70d3cc0ea9f968214a2408457cca25740d9dd | 112,261 |
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
res = line = ''
for c in s:
if c == '\t':
c = ' ' * (tabsize - len(line) % tabsize)
line = line + c
if c == '\n':
res = res + line
line = ''
return res + line | 9090464601038e59e47a69611222666ef5fd7b6c | 112,268 |
import re
def ronametoident(name):
"""
Turn resource object name into an identifier containing only letters, digits and underscore characters
"""
name = re.sub(r"\s", '_', name) # spaces, etc. -> underscores
name = re.sub(r"\W", "", name) # Non-identifier characters -> remove
return name | b6e8c80863d49e5df8a793282ff5d63edbef38df | 112,270 |
from datetime import datetime
def parse_timestamp(value: str):
"""
Parses a string timestamp into a datetime value
First tries to parse the timestamp as milliseconds.
If that fails with an error indicating that the timestamp exceeds the maximum,
it tries to parse the timestamp as microseconds, and then as nanoseconds
:param value:
:return:
"""
try:
timestamp = int(value)
except ValueError:
raise ValueError(f'{value} is not a valid timestamp')
try:
return datetime.utcfromtimestamp(timestamp).astimezone()
except (OverflowError, ValueError, OSError):
pass
# Value exceeds the max. allowed timestamp
# Try parsing as microseconds
try:
return datetime.utcfromtimestamp(timestamp / 1000).astimezone()
except (OverflowError, ValueError, OSError):
pass
# Value exceeds the max. allowed timestamp
# Try parsing as nanoseconds
try:
return datetime.utcfromtimestamp(timestamp / 1000000).astimezone()
except (OverflowError, ValueError, OSError):
pass
# Timestamp is out of range
raise ValueError(f'{value} exceeds maximum value for a timestamp') | c996ea7ec5872d82e3460cf03957d1f4666db4f3 | 112,271 |
def join_range(r):
"""Converts (1, 2) -> "1:2"
"""
return ":".join(map(str, r)) if r else None | b651d4226fb7a6045d903807d3490460c1c05518 | 112,278 |
from random import gauss
def random_mbh(type='agn'):
"""randomizes a black hole mass (in solar masses). one can choose between ``agn`` and ``xrb``."""
if type == 'agn':
# log M_bh = N(7.83, 0.63) (jin12)
return 10**gauss(7.83, 0.63)
elif type == 'xrb':
return 10**gauss(1.1, 0.15)
else: raise Exception('type must be agn or xrb') | 58a1b638fc1acf1a0bf90a25bc3f929f92f338b7 | 112,279 |
def spaces(text):
"""Returns whitespace equal to the length of the given text.
This is useful for making things line up.
"""
return ' ' * len(text) | ba9ecdcaab19884521fbd297d7afc500e667a277 | 112,280 |
def problem_30_narcissistic_number(power):
""" Problem 30: Find sum of numbers that can be written as the sum of powers of their digits.
Args:
power (int): The exponent of the digits.
"""
# find limit to search
limit = len(str(9**power))*(9**power)
valid_numbers = []
for number in range(2, limit, 1):
sum_of_powers_of_digits = 0
for char in str(number):
sum_of_powers_of_digits += int(char) ** power
if sum_of_powers_of_digits == number:
valid_numbers.append(number)
return sum(valid_numbers) | 192d2af1b9f0dea794f9cdada9b87b155e772217 | 112,289 |
def post_process_dialog_submission(*, submission_data, template):
"""Process the ``submission`` data from a Slack dialog submission.
Parameters
----------
submission_data : `dict`
The ``submission_data`` from the Slack dialog's event payload. Keys are
the ``name`` attributes of fields. Values are the user contents of the
fields, or the ``value`` attributes from select menus.
template : `templatekit.repo.BaseTemplate`
The corresponding template instance.
Returns
-------
data : `dict`
A cleaned-up copy of the ``submission_data`` parameter. See Notes
for details of the post processing steps.
Notes
-----
The following steps occur to process ``submission_data``
- Drop any null values from select fields so that we get defaults from
``cookiecutter.json``.
- Replace any truncated values from select fields with the full values.
"""
# Drop any null fields so that we get the defaults from cookiecutter.
data = {k: v for k, v in submission_data.items() if v is not None}
for field in template.config["dialog_fields"]:
if "preset_groups" in field:
# Handle as a preset_groups select menu
selected_label = data[field["label"]]
for option_group in field["preset_groups"]:
for option in option_group["options"]:
if option["label"] == selected_label:
for k, v in option["presets"].items():
data[k] = v
del data[field["label"]]
elif "preset_options" in field:
# Handle as a preset select menu
selected_value = data[field["label"]]
for option in field["preset_options"]:
if option["value"] == selected_value:
for k, v in option["presets"].items():
data[k] = v
del data[field["label"]]
elif field["component"] == "select":
# Handle as a regular select menu
try:
selected_value = data[field["key"]]
except KeyError:
# If field not in data, then it was not set, so use defaults
continue
# Replace any truncated values from select fields with full values
for option in field["options"]:
if option["value"] == selected_value:
data[field["key"]] = option["template_value"]
continue
return data | 2bc9ed67a806fb96227f4fcb1180ac8570ce6fc6 | 112,291 |
def min_max_scaling(x, min_x, max_x):
""" Helper function to calculate min/max scaling for x """
return (x - min_x) / (max_x - min_x) | e12b37431a207e2270f19a30f4ed0e9d48fcc386 | 112,298 |
def init_loop_state(file_counter=0):
"""
Initialize the file row counter, the file counter,
and the list representing file data.
Janky, I know, but this needed to be done in 2 spots.
"""
file_row_counter = 0
file_counter += 1
file_data = []
file_data.append([
'Date',
'Weight (lb)',
'Fat mass (lb)'
])
return (file_row_counter, file_counter, file_data) | 684b5c95b5e75af0fbebde6511c60662ae8aae0b | 112,305 |
def has_error(json):
"""
Returns True if the query has an error; False otherwise.
Given a JSON response to a currency query, this returns the
opposite of the value following the keyword "valid". For example,
if the JSON is
'{ "src":"", "dst":"", "valid":false, "err":"Currency amount is invalid." }'
then the query is not valid, so this function returns True (It
does NOT return the message 'Source currency code is invalid').
Parameter json: a json string to parse
Precondition: json is the response to a currency query
"""
if 'false' in json:
return True
else:
return False | 16e783c59d722d043375878bf5775c668808b57c | 112,307 |
import re
def format_text_time_zone(text):
"""Returns text after removing blank spaces inside time zone text.
Parameters
----------
text : str
the text to be formatted.
Returns
-------
str
a string formatted to match time zone (ex. UTC+2).
"""
text = re.sub(r'\s*\+\s*', '+', text)
text = re.sub(r'\s*-\s*', '-', text)
return text | 9902e9b27142ca0d5984165fdddf70164aa57048 | 112,308 |
def get(tokens):
"""Returns the result of tasks executed in parallel by apalis.
Parameters
----------
tokens : list
List of objects of the type Token.
Returns
-------
list
List of results
Examples
--------
>>> handler_list = [Handler(ClassName(i)) for i in range(10)]
>>> apalis.get([g.f() for g in handler_list])
"""
return [t() for t in tokens] | c318b05de319d4412ef80d50b955f45c3c153b1f | 112,312 |
def amps_m2_to_emu(amps_per_m2):
"""Converts \(Am^{2}\) to \(emu\)"""
return amps_per_m2*1e3 | e331c37b39f6eb104dba3acdf8039b1a45255d09 | 112,314 |
from typing import List
import json
def strings_as_json(strings: List[str]):
"""Convert a list of strings to JSON.
Args:
strings: A list of str.
Returns: JSON dump of the strings.
"""
if len(strings) > 1:
return json.dumps(strings)
elif len(strings) == 1:
return strings[0]
else:
return "" | 49bbf52665cdc86fa30271df8d9d1509bf88a74c | 112,317 |
from pathlib import Path
def lorem(fixtures: Path) -> str:
"""Text for testing."""
with open(fixtures / "lorem.txt", "r") as f:
text = f.read()
return text | 5bec368976fe105fee40d901ffa340d21962c787 | 112,318 |
def create_output(instance, item):
"""Create Terraform Module for Output the Defined Parameters."""
value = "${module.%s.%s}" % (instance, item)
tf_output = {"value": [value]}
return tf_output | 6dc09767450e50d24647ef289b23d07f6b39dea6 | 112,321 |
def userInputCharacterFor10thAxis(userInputCharacter):
""" Maps the chacter '0' to 10th axis for user's input """
if userInputCharacter == "0":
return 10
return int(userInputCharacter) | d90832be11e9640d9227894cc907d58f06771e21 | 112,325 |
import time
def check_timestamp(timeStamp):
""" Checks the validity of a timeStamp.
timeStamp - (YYYY-MM-DD HH:MM:SS in UTC)
returns True or False
"""
isValid = True
try:
timeStruct = time.strptime(timeStamp, "%Y-%m-%d %H:%M:%S")
except:
isValid = False
return isValid | a9ad87e3ad9f62da7119f592fee54f7d73d02b46 | 112,326 |
def squash_int_range(ilist):
"""Takes a list of integers and squashes consecutive values into a string
range. Returned list contains mix of strings and ints.
"""
irange = []
rstart = None
rprev = None
sorted(ilist)
for i, value in enumerate(ilist):
if rstart is None:
if i == (len(ilist) - 1):
irange.append(value)
break
rstart = value
if rprev is not None:
if rprev != (value - 1):
if rstart == rprev:
irange.append(rstart)
else:
irange.append("{}-{}".format(rstart, rprev))
if i == (len(ilist) - 1):
irange.append(value)
rstart = value
elif i == (len(ilist) - 1):
irange.append("{}-{}".format(rstart, value))
break
rprev = value
return irange | 4abd79f8a7382d97d2a832dc11ce6562fd627a20 | 112,327 |
def str_wrap_double(string):
""" Adds double quotes around the input string """
return '"' + string + '"' | 82c33328087addd181634614641513e87a2e44df | 112,330 |
import fnmatch
def filter_outputs(patterns, sys):
"""
Find all outputs of the given system that match one or more of the strings given in patterns.
Parameters
----------
patterns : str or Sequence
A string or sequence of strings to be matched in the outputs of the given system. These
may include glob patterns.
sys : System
The OpenMDAO system whose outputs are to be filtered.
Returns
-------
dict of {str: dict}
A dictionary where the matching output names are the keys and the associated dict provides
the 'units' and 'shapes' metadata.
"""
outputs = {opts['prom_name']: opts for (k, opts) in
sys.get_io_metadata(iotypes=('output',), metadata_keys=['shape', 'units']).items()}
output_names = list(outputs.keys())
filtered = []
results = {}
for pattern in patterns:
filtered.extend(fnmatch.filter(output_names, pattern))
filtered = list(set(filtered)) # de-dupe
for var in filtered:
results[var] = {'units': outputs[var]['units'], 'shape': outputs[var]['shape']}
return results | 77d9b768d9f0bafc7e7f8b2518f60ff1cd8c5f83 | 112,331 |
def angle_between(last_angle, this_angle):
"""
>>> angle_between(50.0, 50.0)
0.0
>>> angle_between(60.0, 50.0)
-10.0
>>> angle_between(50.0, 60.0)
10.0
>>> angle_between(350.0, 5.0)
15.0
>>> angle_between(5.0, 350.0)
-15.0
"""
turn_angle = (this_angle - last_angle) % 360
turn_angle = turn_angle - 360 if turn_angle > 180 else turn_angle
return turn_angle | 9c2d2e7ab06352d9b0778136e54af6862e59fc86 | 112,332 |
def check_empty_value(value):
"""Replaces an empty value with 0."""
try:
value = int(value)
except ValueError:
value = '0'
return int(value) | 9d9e49831687856b6ae6fdf97f6d5ccc5e0121a2 | 112,340 |
def convert_from_str(strval):
"""Returns param as int, long, float, or string, whichever is best."""
try:
ret = int(strval)
return ret
except ValueError:
pass
try:
ret = float(strval)
return ret
except ValueError:
if strval == 'None': return None
return strval | 8fead74f52cf118e0fd73df1b2326d2d031e38e3 | 112,346 |
def calc_sals_ratio_ts(sals_ts):
"""
This function calculates the ratio between the saliency values of consecutive fixations.
Useful for calculating rho of the full model (see paper for details).
:param sals_ts: list of lists of time serieses of saliency values
:return: list of lists of arrays num_images x num_subjects x (T(image, subject) - 1)
of ratios between the saliencies.
"""
sals_ratio_ts = []
for sal_ts in sals_ts:
sals_ratio_ts.append([])
for subject_ts in sal_ts:
sals_ratio_ts[-1].append(subject_ts[1:] / subject_ts[:-1])
return sals_ratio_ts | 2e025d64d0508d13f680c27e8348876cbef2cc01 | 112,348 |
def api_response(success, message, http_status_code, data={}):
"""
API Response common method.
Args:
success (bool): API status
message (str): Message to display
http_status_code (str or int): HTTP Response type. i.e. 200 or STATUS_OK
data (dict): Data to be sent in api Response
Returns: API response with payload and HTTP status code
"""
payload = {"success": success,
"message": message,
"data": data if isinstance(data, dict) else {}}
return payload, http_status_code | 9a57fcfa804092881ab516e94cf2e91e702d8f33 | 112,349 |
def linear(x):
"""
Description: Calculates the linear activation for the input array
Params:
x: Array for which linear activation is to be calculated
Returns:
ndarray: Linear activation of the input
"""
return x | a84e7e42862d50e9304ccfb549d0a888c7f06de9 | 112,350 |
def load_pb_from_file(pb_obj, file_name):
"""Load a protobuf object from a file"""
with open(file_name, "rb") as f:
pb_obj.ParseFromString(f.read())
return pb_obj | f3d9225e24884584d1e4a7e3dc67dbf7a7d6bbdd | 112,356 |
def p2_p1(M1, gamma):
"""Pressure ratio across a normal shock (eq. 3.57)
:param <float> M1: Mach # before the shock
:param <float> gamma: Specific heat ratio
:return <float> Pressure ratio p2/p1
"""
return 1.0 + 2.0 * gamma / (gamma + 1.0) * (M1 ** 2 - 1.0) | 4c85224824bcbe6adf55ca6b28c5a136cfec703d | 112,358 |
def find_resnet_layer(arch, target_layer_name):
"""Find resnet layer to calculate GradCAM and GradCAM++
Args:
arch: default torchvision densenet models
target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.
target_layer_name = 'conv1'
target_layer_name = 'layer1'
target_layer_name = 'layer1_basicblock0'
target_layer_name = 'layer1_basicblock0_relu'
target_layer_name = 'layer1_bottleneck0'
target_layer_name = 'layer1_bottleneck0_conv1'
target_layer_name = 'layer1_bottleneck0_downsample'
target_layer_name = 'layer1_bottleneck0_downsample_0'
target_layer_name = 'avgpool'
target_layer_name = 'fc'
Return:
target_layer: found layer. this layer will be hooked to get forward/backward pass information.
"""
if "layer" in target_layer_name:
hierarchy = target_layer_name.split("_")
layer_num = int(hierarchy[0].lstrip("layer"))
if layer_num == 1:
target_layer = arch.layer1
elif layer_num == 2:
target_layer = arch.layer2
elif layer_num == 3:
target_layer = arch.layer3
elif layer_num == 4:
target_layer = arch.layer4
else:
raise ValueError("unknown layer : {}".format(target_layer_name))
if len(hierarchy) >= 2:
bottleneck_num = int(
hierarchy[1].lower().lstrip("bottleneck").lstrip("basicblock")
)
target_layer = target_layer[bottleneck_num]
if len(hierarchy) >= 3:
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) == 4:
target_layer = target_layer._modules[hierarchy[3]]
else:
target_layer = arch._modules[target_layer_name]
return target_layer | 4e10363e3a14d0a91b8780490eefefaceab53021 | 112,365 |
def validate(job):
"""
Perform user specific payload/job validation.
:param job: job object.
:return: Boolean (True if validation is successful).
"""
return True | 9fa7eada2af51bfbdc0e9e7ff3d5c7cbad5cf974 | 112,367 |
def x_from_m_b_y(m, b, y):
"""
get x from y=mx+b
:param m: slope (m)
:param b: b
:param y: y
:return: get x from y=mx+b
"""
return (y - b) / m | ca14ea80aea7dbb066ea1cc2da07c0c28f741c5b | 112,368 |
def in_segment(point, line):
"""Determine if a point is inside a segment, if we already know that
the point is on the infinite line defined by the segment.
"""
if line[0].x != line[1].x: # line is not vertical
if line[0].x <= point.x <= line[1].x:
return True
if line[1].x <= point.x <= line[0].x:
return True
else: # line is vertical, so test the y coordinate
if line[0].y <= point.y <= line[1].y:
return True
if line[1].y <= point.y <= line[0].y:
return True | c9a67fc8ab193af9ec6d583fa464a7e7252f70e5 | 112,374 |
def xor(block_a: "str | bytes", block_b: "str | bytes"):
""" Computes the XOR of two bytes objects.
Args:
block_a (str | bytes): The first block of bytes
block_b (str | bytes): The second block of bytes
Returns:
bytes: The result of block_a ^ block_b
"""
if isinstance(block_a, str): block_a = block_a.encode()
if isinstance(block_b, str): block_b = block_b.encode()
result = bytearray(block_a)
for index, byte in enumerate(block_b): result[index] ^= byte
return bytes(result) | 92b9809c955e1a70d4b97d20f4327de37fdb4be8 | 112,375 |
def closest_object(geometries, point):
"""Find the nearest geometry among a list, measured from fixed point.
Args:
geometries: a list of shapely geometry objects
point: a shapely Point
Returns:
Tuple (geom, min_dist, min_index) of the geometry with minimum distance
to point, its distance min_dist and the list index of geom, so that
geom = geometries[min_index].
"""
min_dist, min_index = min((point.distance(geom), k)
for (k, geom) in enumerate(geometries))
return geometries[min_index], min_dist, min_index | 7c8dc6f9e367c54ab9fc4f87e769309a9f2558a0 | 112,379 |
import cProfile
import io
import pstats
def profileit(func):
""" decorator for profiling function.
usage:
>>> def this(var1, var2):
# do something
>>> new_this = profileit(this)
>>> calls, cprofile_text = new_this(var1=1,var2=2)
"""
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(prof, stream=s).sort_stats(sortby)
ps.print_stats()
text = s.getvalue()
calls = int(text.split('\n')[0].lstrip().split(" ")[0])
return calls, text
return wrapper | 6319e957882fa28163a40eb9ca8183707e51556f | 112,385 |
def mecs_energy_fba_cleanup(fba, attr, **kwargs):
"""
Clean up the EIA MECS energy FlowByActivity
:param fba: df, FBA format
:param attr: dictionary, attribute data from method yaml for activity set
:param kwargs: optional, can also include bool 'download_FBAs_if_missing'
:return: df, subset of EIA MECS Energy FBA
"""
# subset the df to only include values where the unit = MJ
fba = fba.loc[fba['Unit'] == 'MJ'].reset_index(drop=True)
return fba | ec75bf2df5c19c93ada753ef704f65f0114584d2 | 112,386 |
def first_entity_matching_attribute(attribute, value, atlas_entities):
"""
Return the first entity in a list that matches the passed in attribute
and value.
:param str attribute: The name of the attribute to search on each
atlas entity.
:param str value: The value of the attribute to search on each
atlas entity.
:param atlas_entities: The list of atlas entities to search over.
:type atlas_entities: list(:class:`~pyapacheatlas.core.entity.AtlasEntity`)
:raises ValueError:
A matching entity was not found in the provided entities.
:return: The atlas entity that maches the attribute and value.
:rtype: :class:`~pyapacheatlas.core.entity.AtlasEntity`
"""
output = None
for entity in atlas_entities:
if attribute in entity.attributes:
if entity.attributes[attribute] == value:
output = entity
break
if output is None:
raise ValueError(
"Unable to find an entity that matches the value of {value} in "
"an attribute of {attribute} from the {num_entities} provided."
.format(
value=value, attribute=attribute,
num_entities=len(atlas_entities)
)
)
return output | e53163f892cbec9ded4877784d3c4adf051170f3 | 112,387 |
def cost_objective(model):
"""
The cost objective calculates the total cost.
The total_cost_per_year = discount_factor * price_increment * how_much_bought_from_whom * load_hours
:param model: model reference
:return: Total cost
"""
starting_year = 2009
costs = []
for year in model.YEARS:
index = year-starting_year
for supplier in model.SUPPLIER:
for block in model.BLOCKS:
cost = model.discount_factor[year] * \
pow(model.cost_increase[supplier], index) * \
model.buy[year, supplier, block] * \
model.supply_cost[supplier, block] * \
model.load_hours[block]
costs.append(cost)
return sum(costs) | 1795ee74c65ff9f79d8563b34a76997e0c173d01 | 112,393 |
import torch
def reduce(to_reduce: torch.Tensor, reduction: str) -> torch.Tensor:
"""
reduces a given tensor by a given reduction method
Parameters
----------
to_reduce : torch.Tensor
the tensor, which shall be reduced
reduction : str
a string specifying the reduction method.
should be one of 'elementwise_mean' | 'none' | 'sum'
Returns
-------
torch.Tensor
reduced Tensor
Raises
------
ValueError
if an invalid reduction parameter was given
"""
if reduction == 'elementwise_mean':
return torch.mean(to_reduce)
if reduction == 'none':
return to_reduce
if reduction == 'sum':
return torch.sum(to_reduce)
raise ValueError('Reduction parameter unknown.') | c3757d6aecf8a3969117db7be60e6a50daa889b2 | 112,399 |
from typing import Optional
def _parse_result(potfile_line: str) -> Optional[str]:
"""Helper function which parses a single .potfile line and returns the password part.
Hashcat uses its .potfile format to report results. In this format, each line consists of the
hash and its matching word, separated with a colon (e.g. `asdf1234:password`).
"""
if potfile_line:
return potfile_line.split(":")[-1].strip()
return None | 8b02d58490b633b4b0c87c0cbc38da834d5dad0f | 112,409 |
def _check_params(params, error_callback):
"""Predicate function returning bool depending on conditions.
List of conditions:
- `host` and `key` must be specified in params,
- `dc` or `env` must be specified in params,
- `sections` must be specified in params.
Returns:
bool: True if all conditions are met, False otherwise
"""
required_params = {'host', 'key'}
diff = required_params - {k for k, v in params.items() if v}
if diff:
error_callback('ERROR: {} are required.'.format(
', '.join(['--{}'.format(d) for d in diff]))
)
return False
dc = params.get('dc')
net_env = params.get('net_env')
if (dc and net_env) or (not dc and not net_env):
error_callback(
'ERROR: Only DC or ENV mode available.',
)
return False
sections = params.get('sections')
if not sections:
error_callback(
'ERROR: option `sections` are required.',
)
return False
return True | 9106e6e4ca9e52fd6090ff9abb4553c37200ad17 | 112,411 |
def angryProfessor(k, a):
"""
Given the arrival time of each student and a threshold number of attendees, determine if the class is canceled.
:param k: int threshold for minimum on time student
:param a: array of arrival time of students
:return: Class cancel
"""
count = 0
# counting students on time
for s in a:
if s <= 0:
count += 1
# Class is not canceled
if count >= k:
return False
# Class is canceled
else:
return True | 19e75f1760d24bb97e8f9db988bdb907d8dfc573 | 112,421 |
from typing import Any
from typing import List
import inspect
def get_super_classes_names(instance_or_class: Any) -> List[str]:
"""Given an instance or a class, computes and returns a list of its superclasses.
Parameters
----------
instance_or_class : Any
An instance of an object or a class.
Returns
-------
List[str]
List of superclasses names strings.
"""
super_class_names_list = []
if not inspect.isclass(instance_or_class):
instance_or_class = instance_or_class.__class__
super_classes_tuple = inspect.getmro(instance_or_class)
for super_class in super_classes_tuple:
super_class_name = (
str(super_class).replace("'", "").replace("<class ", "").replace(">", "")
)
super_class_names_list.append(super_class_name)
return super_class_names_list | d91922b319220e0e07aa48beef20b946d8c4afb5 | 112,423 |
def conv(num):
"""Convert float to string and removing decimal place as necessary."""
if isinstance(num, float) and num.is_integer():
return str(int(num))
return str(num) | d2b10d8e8776aea1913e2024954152b0d9a80794 | 112,424 |
import re
def not_raw(product_name):
"""
Check if the product is of type RAW
we check for this because we are not interested in
creation and ingestion of a RAW dataset.
:param product_name:
:return:
"""
match = re.search(r'([\w.-]+)_([\w.-]+)_([\w.-]+)__([\d])([\w])([\w.-]+)', product_name)
if match:
product_type = match.group(3)
if product_type == "RAW":
return False
else: return True
else:
return True | 8c476c420e49e1fe3cc7cacb14b17953b5c5c92d | 112,428 |
def isfloat(string):
"""Returns True if string is a float"""
try: float(string)
except: return False
return True | 4da872c151e82cd0f30149a34dfdf64930abb486 | 112,434 |
def read_txt(fname, separator=None, **kwargs):
"""
Read a text-file's non-zero lines and
split them using a blank-space separator.
Parameters
----------
fname : str
Name of the file including the path
if needed.
separator : str
Character separating different words.
By default None, which effectively is
a space (or more spaces).
Returns
-------
content : list
List of split lines (lists of words).
"""
content = []
with open(fname, 'r') as f:
for line in f:
line = line.split(separator)
if len(line) != 0:
content.append(line)
return content | 8b292dba4d101aa8a214f8226bb7e0c71e3b69f7 | 112,436 |
from typing import Set
def parse_scopes(scope_string: str) -> Set[str]:
"""
Takes a scope string (as seen in the `scope` query parameter of a Slack app installation URL)
and returns a set of scopes.
"""
return set(scope_string.split(",")) | 3d2bbc4ad0cd900d78ab7b0029366f010a6a66da | 112,440 |
import re
def valid_password(password):
"""Checks if password is valid"""
# accepts passwords between 3 and 20 characters
pass_re = re.compile(r"^.{3,20}$")
return password and pass_re.match(password) | 377fcb3ba0924cd8b3beeeba8bd2da4c0ae5ad00 | 112,442 |
def find_king(game_board, bw):
"""
Finds right king
:param game_board: array of board
:param bw: black or white to move?
:return: Square with king, None if king isn't present
"""
# Find king
for r in game_board:
for sqr in r:
if sqr.get_content() is not None:
if sqr.get_content().get_type() == "K" and bw == "w":
return sqr
elif sqr.get_content().get_type() == "k" and bw == "b":
return sqr
# Didn't find king. Will hopefully never reach this
return None | fb0eb1e301603939e8a0caadda92ee1f7174171f | 112,446 |
def _pythonify_name(name):
"""Coerce the name to a valid Python identifier"""
name = ''.join(c if c.isalnum() else '_' for c in name)
if name[:1].isdigit():
name = '_' + name
return name | 6bbb6c7daef7c6687b4261f6e82c0d4102a74511 | 112,452 |
def getNumFavorited(msg):
"""Counts the number of favorites the mssage received."""
num_favorited = msg['favorited_by']
return len(num_favorited) | 8b1ff22c33660126180e84c45cd48c8cc3dd6f16 | 112,453 |
def OnPremisesConfiguration(sql_messages, source_ip_address, source_port):
"""Generates the external master configuration for the instance.
Args:
sql_messages: module, The messages module that should be used.
source_ip_address: string, the IP address of the external data source.
source_port: number, the port number of the external data source.
Returns:
sql_messages.OnPremisesConfiguration object.
"""
return sql_messages.OnPremisesConfiguration(
hostPort='{0}:{1}'.format(source_ip_address, source_port)) | 152c3dccafde0d3d25fccc7cf3ae94e0d3968137 | 112,456 |
from operator import add
def getAccuracy(dtModel, data):
"""
Return accuracy of DecisionTreeModel on the given RDD[LabeledPoint].
"""
seqOp = (lambda acc, x: acc + (x[0] == x[1]))
predictions = dtModel.predict(data.map(lambda x: x.features))
truth = data.map(lambda p: p.label)
trainCorrect = predictions.zip(truth).aggregate(0, seqOp, add)
if data.count() == 0:
return 0
return trainCorrect / (0.0 + data.count()) | 1acd18490ce6e63b219364fe67a2eb40b31df152 | 112,461 |
def tie_breaker_index(tie_breaker, name):
"""
Return the index of name in tie_breaker, if it is present
there, else return the length of list tie_breaker.
Args:
tie_breaker (list): list of choices (strings)
list may be incomplete or empty
name (str): the name of a choice
Returns:
(int): the position of name in tie_breaker, if present;
otherwise the length of tie_breaker.
Example:
>>> tie_breaker = ['a', 'b']
>>> tie_breaker_index(tie_breaker, 'a')
0
>>> tie_breaker_index(tie_breaker, 'c')
2
"""
if name in tie_breaker:
return tie_breaker.index(name)
return len(tie_breaker) | ba9216a37ef43a3e6ab2283618500b1721e3189d | 112,462 |
import requests
def get_match_history(less_then_match_id=None):
"""
Makes api call to open dota pro matches route
Returns list of 100 match result objects
If a match Id is provided 100 matches before specified match are returned
Input:
None: Retruns 100 most recent matches
or
less_then_match_id(int): Id of match to retrun results before
Output:
data(list): list of match objects(docs at https://docs.opendota.com/#tag/pro-matches)
"""
if less_then_match_id == None:
response = requests.get('https://api.opendota.com/api/proMatches')
else:
response = requests.get(f'https://api.opendota.com/api/proMatches?less_than_match_id={less_then_match_id}')
#print(response)
data = response.json()
#print(data)
return data | 0c093aa26f6789cd8310e064f3a88d43016dc210 | 112,476 |
def get_board_dict(data):
"""Convert the data dictionary into a format that is used by the
print_board function.
"""
board_dict = {}
for type, tokens in data.items():
if type == "upper":
for tok in tokens:
board_dict[(tok[1], tok[2])] = "({})".format(tok[0].upper())
elif type == "lower":
for tok in tokens:
board_dict[(tok[1], tok[2])] = "({})".format(tok[0])
else:
for tok in tokens:
board_dict[(tok[1], tok[2])] = "(X)"
return board_dict | 695455a3d9a195441388a9bf3687b64200c9f1b4 | 112,477 |
def spec2nwbfields(spec):
"""Reads spec and determines __nwbfields__ which tells pynwb which attributes
to write to disk
Parameters
----------
spec: dict
Returns
-------
tuple of variable names
"""
vars = [attrib.name for attrib in spec.attributes if attrib.name]
if hasattr(spec, 'datasets'):
vars += [dataset.name for dataset in spec.datasets if dataset.name]
if hasattr(spec, 'groups'):
for attrib in spec.groups:
if attrib.name:
if 'neurodata_type_inc' in attrib or 'neurodata_type_def' in attrib:
vars.append({'name': attrib.name, 'child': True})
else:
vars.append(attrib.name)
return tuple(vars) | ef076cdb45fbb392556c374d6a6db49028b5d507 | 112,479 |
def code_finished_will_parse(s, compiler):
"""Returns a tuple of whether the buffer could be complete and whether it
will parse
True, True means code block is finished and no predicted parse error
True, False means code block is finished because a parse error is predicted
False, True means code block is unfinished
False, False isn't possible - an predicted error makes code block done"""
try:
finished = bool(compiler(s))
code_will_parse = True
except (ValueError, SyntaxError, OverflowError):
finished = True
code_will_parse = False
return finished, code_will_parse | dfb284119a776a8e2eed362afcfa593ffad1b419 | 112,482 |
def extract_sub_attributes(browser):
"""
Extract sub attribute tags of avatar image from web page
:param browser: opened browser
:type browser: webdriver.Chrome
:return: sub attribute tags of avatar image
:rtype: list
"""
sub_attrs = browser.find_elements_by_class_name('sub-attr')
sub_attr_results = []
for sub_attr in sub_attrs:
sub_attr_results.append(sub_attr.text)
return sub_attr_results | 46ed55666316839bb0f09e82696ab515056b572c | 112,484 |
import torch
def linear_quantize(input, scale, zero_point, inplace=False):
"""
Linearly quantize the input tensor based on scale and zero point.
https://pytorch.org/docs/stable/quantization.html
"""
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
# print(scale)
return torch.round(input * scale - zero_point) | 3d7c4df2bc0c6f5449b5076e498005212d2729b2 | 112,486 |
def calc_accuracy(pred, real):
"""
A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes
Param:
- pred, a numpy array of predicted classes
- real, a numpy array of the real classes
Return:
- Accuracy as a decimal
"""
return sum(pred==real) / len(pred) | 22fc983c3161f918b7d114f066de84ddb905f4d8 | 112,489 |
import re
def date_is_valid(date: str) -> bool:
"""
Check whether given date is of format YYYY-MM-DD
:param date: Date passed as string.
:return: True if date has format YYYY-MM-DD, False otherwise.
"""
if re.fullmatch("\\d{4}-\\d{2}-\\d{2}", date):
return True
return False | 98967559a3aabae3a809aa05bd48031da2070820 | 112,495 |
from typing import List
def as_bool(data: str) -> List[bool]:
"""
Parses a string into a list of bools, where '#' are true, and everything else is false
:param data: String to parse
:return: The generated list of bools
"""
return list(map(lambda c: c == "#", data)) | 5d74cc535c27845718d3954cc0f67f15c799adb8 | 112,497 |
def define_circle(R):
"""Return shape function for circle."""
def shape(site):
return site.pos[0]**2 + site.pos[1]**2 < R**2
return shape | 943b69db11dbba5df94bf4164e7b44d669d14643 | 112,501 |
def _get_internal_bounds(params):
"""Extract the internal bounds from params.
Args:
params (pd.DataFrame): See :ref:`params`.
Returns:
bounds (tuple): Bounds of the free parameters. The tuple has two entries.
The first are the lower bounds as numpy array.
The second are the upper bounds as numpy array.
"""
bounds = tuple(
params.query("_internal_free")[["_internal_lower", "_internal_upper"]]
.to_numpy()
.T
)
return bounds | c41037c12df4aa4176db9abad308696f93d31ba8 | 112,507 |
def get_value(x):
"""Returns value of tensor as numpy array """
return x | f909613c4a4f928aabbd22aff1acb36dc1d59b9c | 112,509 |
import posixpath
def normalize_path_posix(path):
"""
normalize_path_posix(path) -> str
Turn path into a relative POSIX path with no parent directory references.
"""
return posixpath.relpath(posixpath.normpath(posixpath.join('/', path)),
'/') | ca816b6e4902b06c5a76c10a24fc0de64cf1ac3d | 112,510 |
def find_column(t, pos=None):
"""
Find the column of the token or a specific position in the token's data.
Argument:
t - the token
pos - an optional number indicating the position in the input data, if
not using the token's starting position
"""
position = pos or t.lexpos # position of the character in the input stream, default to the token's position
last_cr = t.lexer.lexdata.rfind('\n', 0, position)
if last_cr < 0:
last_cr = 0
column = (position - last_cr) + 1
return column | 3ac367cd7cfa6a87f704b80bed5c6c5ee53c0c9a | 112,513 |
def GetChromOrder(sample_blocks):
"""
Get a list of chroms in sorted order
Parameters
----------
sample_blocks : list of [hap_blocks]
each hap_block is a dictionary with keys
'pop', 'chrom', 'start', 'end'
Returns
-------
chroms : list of int
list of chromsomes in sorted order
"""
chroms = set()
for i in range(len(sample_blocks)):
for sb in sample_blocks[i]:
chroms.add(sb['chrom'])
chroms = list(chroms)
chroms.sort()
return chroms | 19fa822a661d155d77fbe2cfb52c8b7091763214 | 112,521 |
def reduce_shape(shape):
"""
Reduce dimension in shape to 3 if possible
"""
try:
return shape[:3]
except TypeError:
return shape | da1c53116bede886fa616836d34652adc1cb9be5 | 112,524 |
def str_to_bytes(value, encoding="utf8"):
"""Converts a string argument to a byte string"""
if isinstance(value, bytes):
return value
if not isinstance(value, str):
raise TypeError('%r is not a string' % value)
return value.encode(encoding) | 293c714b5cb1fa0e89cf0afca156782e8892fa59 | 112,528 |
def returnListWithoutOutliers(data, outlierRange):
"""
An outlier is defiend as a datapoint not in [Q1 - 1.5*IQR*outlierRange, Q3 + 1.5*IQR*outlierRange],
where IQR is the interquartile range: Q3 - Q1
"""
data.sort()
dataPointsBefore = len(data)
Q1 = data[dataPointsBefore//4]
Q3 = data[3*dataPointsBefore//4]
IQR = Q3 - Q1
lowerFence = Q1 - 1.5 * IQR * outlierRange
upperFence = Q3 + 1.5 * IQR * outlierRange
filteredData = [i for i in data if i >= lowerFence and i <= upperFence]
dataPointsAfter = len(filteredData)
print('Removed ' + str(dataPointsBefore - dataPointsAfter) + ' outliers')
return filteredData | 1c8dc965af7057dddeec7e44cd95f21702e5fd99 | 112,529 |
def bool_str(v):
"""Convert a boolean to a string."""
return "Yes" if v else "No" | e4f9e7e3678ff83fa040c80ea5c2a36f51d57a05 | 112,531 |
def rasterizeSegment(start_x, start_y, end_x, end_y):
"""Implementation of Bresenham's line rasterization routine.
This is a slightly modified version of the Python implementation
one Rosetta code: https://rosettacode.org/wiki/Bitmap/Bresenham%27s_line_algorithm#Python
Args:
start_x: the x-coordinate of the start point of the segment
start_y: the y-coordinate of the start point of the segment
end_x: the x-coordinate of the end point of the segment
end_y: the y-coordinate of the end point of the segment
Returns:
A list [(x, y)] of the image pixel coordinates along the line
"""
result = []
dx = abs(end_x - start_x)
dy = abs(end_y - start_y)
x, y = start_x, start_y
sx = -1 if start_x > end_x else 1
sy = -1 if start_y > end_y else 1
if dx > dy:
err = dx / 2.0
while x != end_x:
result.append((x, y))
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.0
while y != end_y:
result.append((x, y))
err -= dx
if err < 0:
x += sx
err += dy
y += sy
result.append((x, y))
return result | bc2560f07c133075a21adfadf0c3c0ac48e40705 | 112,532 |
def add_user_tag(f):
"""Adds my GitHub username as an attribute ``__user_tag__`` to ``f``.
:param f: Function to decorate
:type f: function
:rtype: function
"""
f.__user_tag__ = "phetdam"
return f | 4fab1de0306dd70dd19a8c589fe068846463eabc | 112,533 |
def running_varsum(v_k1, x_k, m_k1, m_k):
""" Computing the running variance numerator.
Ref: https://www.johndcook.com/blog/standard_deviation/
"""
return v_k1 + (x_k - m_k1) * (x_k - m_k) | c63fdfc8028b8b030e5520fcd868ffe170e0003b | 112,537 |
def print_departments(departments):
"""
Prints the department courses in a prettier format.
Parameters:
-----------
departments (dict): Departments (str key) and courses in departments (list value)
returns (NoneType): None
"""
for department, courses in departments.items():
print(department + ": [" + ', '.join(courses) + "]")
return None | bc6ed9582dd1827a376f930923e53f8af10c4d5d | 112,540 |
import asyncio
async def send(data: bytes, writer: asyncio.StreamWriter
) -> None:
"""Send a byte array using a stream writer.
Args:
data (bytes): The bytes data to send.
writer (asyncio.StreamWriter): The stream writer to send data
on.
"""
header = b'%d\n' % len(data)
writer.write((header) + (data))
return await writer.drain() | 107789ac5102999ae2ce78a0a92e97e7485151da | 112,541 |
def atoi(text):
"""Convert text to an integer if possible, else return the string."""
return int(text) if text.isdigit() else text | 48c01937e7d090991b6827a3ee003356bb857a03 | 112,542 |
def dists_to_nj(matrix, labels):
"""Wraps matrix and labels together for format NJ requires."""
result = {}
for outer, row in zip(labels, matrix):
for inner, i in zip(labels, row):
result[(outer, inner)] = i
return result | 20b3b3b3c1a12848f6dbd098f9e05f1331f56d3f | 112,543 |
def _OrganizeTryJobResultByCulprits(try_job_culprits):
"""Re-organize try job culprits by revision.
Args:
try_job_culprits (dict): A dict of culprits for one step organized by test:
{
'tests': {
'a_test1': {
'revision': 'rev1',
'commit_position': '1',
'review_url': 'url_1'
},
'a_test2': {
'revision': 'rev1',
'commit_position': '1',
'review_url': 'url_1'
}
}
}
Returns:
A dict of culprits for one step organized by revison:
{
'rev1': {
'revision': 'rev1',
'commit_position': '1',
'review_url': 'url_1',
'tests': ['a_test1', 'a_test2']
}
}
"""
if not try_job_culprits or not try_job_culprits.get('tests'):
return {}
organized_culprits = {}
for test_name, culprit in try_job_culprits['tests'].iteritems():
revision = culprit['revision']
if organized_culprits.get(revision):
organized_culprits[revision]['failed_tests'].append(test_name)
else:
organized_culprits[revision] = culprit
organized_culprits[revision]['failed_tests'] = [test_name]
return organized_culprits | 42a8f0cbc2f08adabf81bd3cf074ed199af6c05a | 112,547 |
def convert_es_responses_to_list(search_responses: list):
"""
Convert responses from ElasticSearch to list.
This will be used in the backend
"""
submissions = []
for response in search_responses:
submission = response["_source"]
submission["score"] = response["_score"]
submission["submission_id"] = response["_id"]
submissions.append(submission)
return submissions | 09657b03d7554d26276a3457c7d75e3c86da735a | 112,549 |
def get_dictionary_values_ordered(dict_list, key_name):
"""Generate a list, with values from 'key_name' found in each dictionary
in the list of dictionaries 'dict_list'. The order of the values in the
returned list match the order they are retrieved from 'dict_list'
dict_list - a list of dictionaries where each dictionary has the same
keys. Each dictionary should have at least one key:value pair
with the key being 'key_name' (required)
key_name - a String or Int for the key name of interest in the
dictionaries (required)
return - a list of values from 'key_name' in ascending order based
on 'dict_list' keys"""
# Initiate an empty list to store values
ordered_value_list = []
# Iterate over the list and extract the wanted value from each dictionaries
# 'key_name'. Append the value to the new list
for item in dict_list:
ordered_value_list.append(item[key_name])
return ordered_value_list | 409345e91170dd9680a8b78177010d7983540dcb | 112,552 |
def binarySearch(left, right, largeEnough, eps):
"""
Binary search on a real number region.
Parameters
----------
left, right : float
The range [left, right] that will be searched. It is recommended that make sure the dividing point is between [left, right], or the result may not be the answer wanted.
largeEnough : callable
A callable(function) that accepts one float number, and decide whether it is large enough or not for the search. It is required that it should be monotonic: over some number it should be True, while below the number it returns False.
eps : float
The required accuracy of the binary search.
Returns
-------
float
The value just over which can make largeEnough return True, and below which it returns False.
"""
assert (left < right), "binary search must start from where (left < right), but get left = {} and right = {}.".format(left, right)
l = left
r = right
mid = (l + r) * 0.5
while (r - l > eps):
if (largeEnough(mid)):
r = mid
else:
l = mid
mid = 0.5 * (l + r)
return mid | b796f44532f49307f92e7c427d49d10311b44f0d | 112,559 |
def set_nth_bit(n: int, i: int) -> int:
"""
Set the n-th bit.
>>> bin(set_nth_bit(0b100000, 0))
'0b100001'
>>> bin(set_nth_bit(0b100001, 0))
'0b100001'
"""
return n | (1 << i) | 1fb3217698b50487f55c7eb0afe8fb2d927fdf3a | 112,562 |
def check_length_for_tweet(revision, message):
"""
Recursively remove a word from the message until it is small enough to tweet
"""
# I think 115 is the hard limit once the url is shortened
if len(revision) + len(message) > 110:
# get rid of a word
message = ' '.join(message.split(' ')[:-1])
return check_length_for_tweet(revision, message)
return revision, message | 7ab55478c20076cd983a6561017ee7100f90d01a | 112,567 |
def get_id_from_ns_name(ns_name):
"""Parses identifier from prefix-identifier
:param ns_name: The name of a namespace
:returns: Identifier or None if there is no - to end the prefix
"""
dash_index = ns_name.find('-')
if 0 <= dash_index:
return ns_name[dash_index + 1:] | 0f43ccbe278268553da5c21a7426e1c099edc236 | 112,568 |
import json
def label_mapping(category_names = 'cat_to_name.json'):
"""
Function loads in a mapping from category label to category name and creates
a dictionary from it where the keys are the indices and the values the names.
Parameters:
category_names - name of the file (has to be in the folger "ImageClassifier"
Returns:
cat_to_name - dictionary mapping the integer encoded categories to the actual
names of the flowers.
"""
## Create string
path_category_names = 'ImageClassifier/' + category_names
## Load in a mapping from category label to category name (JSON object)
with open(path_category_names, 'r') as f:
name_mapping = json.load(f)
# Create a dictionary where the keys are the numbers (converted from string
# to integers) and the values are the names
name_mapping = {int(k):v for k,v in name_mapping.items()}
## Return the dictionary
return name_mapping | 0fcf3ce36f3889239119ef1e40b15b71949a2e34 | 112,572 |
import unicodedata
def is_whitespace(ch):
"""判断是否是空格"""
if ch == " " or ch == "\t" or ch == "\n" or ch == "\r":
return True
# unicodedata是unicode数据集
cat = unicodedata.category(ch)
# Zs表示的是空格
if cat == "Zs":
return True
return False | 97573537843b4c2b5d510810b828aa8f7eecad4f | 112,575 |
def singlyfy_parameters(parameters):
"""
Makes a cgi-parsed parameter dictionary into a dict where the values that
are just a list of a single value, are converted to just that single value.
"""
for key, value in parameters.items():
if isinstance(value, (list, tuple)) and len(value) == 1:
parameters[key] = value[0]
return parameters | 988a0fec0929490d02327558155625d76e53abff | 112,576 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.