content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def axprops(dct):
"""Filters `dct` for properties associated with a plot axes.
Example:
>>> # Note how kwargs gets split into axes/line properties.
>>> def myplotter(ax, x, y, **kwargs)
>>> ax.set(**axprops(kwargs))
>>> ax.plot(x, y, kwargs)
"""
# List of included axis properties
props = ["title", "facecolor", "aspect"]
# Append xyz-specific props
for ax in ["x", "y", "z"]:
for p in ["label", "ticks", "scale", "lim"]:
props.append(ax+p)
# intersection(dct,props)
props = {p: dct.pop(p) for p in props if p in dct}
return props | 8eeaf0814a08a325cdee9f58272c19f4ae12c88e | 122,969 |
def get_nonprimitive_nodes (G):
"""
Obtain nonprimitive nodes of a DAG
input nodes (in_nodes) - in_degree is 0
output nodes (out_nodes) - out_degree is 0
"""
in_nodes, out_nodes = [], []
for node in G.nodes():
indegree = G.in_degree(node)
outdegree = G.out_degree(node)
if outdegree == 0:
out_nodes.append(node)
if indegree == 0:
in_nodes.append(node)
nonprimitives = in_nodes + out_nodes
return in_nodes, out_nodes, nonprimitives | 1417301f9b659135aceb83a5a78ecf79b3563dbd | 122,973 |
def _get_title(item):
"""
Get the first two photo's tags/keywords to make the title and transform it
to title case, as shown on its page.
"""
tags = item.get("keywords", [])[:2]
if len(tags) > 0:
tags.append("Photo")
img_title = " ".join(tags)
return img_title.title() | f7c4a7b181a2b08a3be7f8fc1fa09b3655521ef5 | 122,974 |
def is_virtual_column(col_id):
"""
Returns whether col_id is of a special column that does not get communicated outside of the
sandbox. Lookup maps are an example.
"""
return col_id.startswith('#') | 656f67f00840fb4abdfb371cc0c35c95e58ea0f0 | 122,977 |
def ascii_art(version):
"""
Used to build SolidFire ASCII art.
:return: a string with the SolidFire ASCII art.
"""
art = "\n"
art += "\n"
art += " 77 \n"
art += " 7777 \n"
art += " 77 \n"
art += " == \n"
art += " 77IIIIIIIIIIIIIIIIII777 \n"
art += " =7 7= \n"
art += " 7 7 \n"
art += " =7 7= \n"
art += " =7 7= \n"
art += " =77 7777777777777777777 77= \n"
art += " 7777 777777777777777777777 7777 \n"
art += " 7777 7777777777777777777 7777 \n"
art += " =77 77= \n"
art += " =7 7= \n"
art += " 7 7 \n"
art += " 7= =7 \n"
art += " 77= =77 \n"
art += " =7777777777777777777= \n"
art += " \n"
art += " ====IIIIIIIIII===== \n"
art += " =77777= =77777= \n"
art += " =777= =777= \n"
art += " =777= =777=\n"
art += " \n"
art += " NetApp SolidFire Version {0} " \
"\n".format(version)
art += " \n"
return art | 0e3dfb5f6dfaf2cef528a1507988d0f26a19c9dc | 122,980 |
def flatten(L):
"""Flatten an iterable of iterables into a single list."""
return [i for x in L for i in x] | 16f9a7ba7533923359c82100b2b87d87159bbf64 | 122,982 |
def unlock_params(lock_handle):
"""Returns parameters for Action Unlock"""
return {'_action': 'UNLOCK', 'lockHandle': lock_handle} | ba23cb48ea7f013009aff34fee87f4677bc096d7 | 122,985 |
import json
def _record_value_parser(buf):
"""
Parses value within OVSDB tables and returns python object corresponding
to the value type.
:param buf: value of 'ovs-vsctl list' or `ovs-vsctl find` command.
:return: python object corresponding to the value type of row.
"""
if buf.startswith('["uuid",'):
# UUID type
# e.g.)
# ["uuid","79c26f92-86f9-485f-945d-5786c8147f53"]
_, value = json.loads(buf)
elif buf.startswith('["set",'):
# Set type
# e.g.)
# ["set",[100,200]]
_, value = json.loads(buf)
elif buf.startswith('["map",'):
# Map type
# e.g.)
# ["map",[["stp-enable","true"]]]
_, value = json.loads(buf)
value = dict(value)
else:
# Other type
# e.g.)
# "br1" --> str
# 100 --> int
# true/false --> True/False
# null ... --> None
value = json.loads(buf)
return value | 5e44170e4c8b8167862346d40946342b2810bde7 | 122,987 |
def solution(array):
"""
Returns the value of the maximal product of three values in an array.
"""
array.sort()
# For the maximal product, we might have to multiply two negative values,
# that is why we compare the product of the three biggest elements with
# the product of the biggest element and the two smallest elements
return max(array[-1] * array[-2] * array[-3], array[-1] * array[0] * array[1]) | 79d8cadf07c85900ae80b681e2a96219db14bade | 122,990 |
import collections
from typing import Mapping
def namedtuple_with_defaults(typename, field_names, default_values=[]):
"""Create a namedtuple with default values
Examples
--------
>>> Node = namedtuple_with_defaults('Node', 'val left right')
>>> Node()
Node(val=None, left=None, right=None)
>>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3])
>>> Node()
Node(val=1, left=2, right=3)
>>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7})
>>> Node()
Node(val=None, left=None, right=7)
>>> Node(4)
Node(val=4, left=None, right=7)
"""
the_tuple = collections.namedtuple(typename, field_names)
the_tuple.__new__.__defaults__ = (None,) * len(the_tuple._fields)
if isinstance(default_values, Mapping):
prototype = the_tuple(**default_values)
else:
prototype = the_tuple(*default_values)
the_tuple.__new__.__defaults__ = tuple(prototype)
return the_tuple | 22e5f3800f888d7975d12f42cb9793c9aa383887 | 122,991 |
def calc_precision(tp: int, fp: int) -> float:
"""Calculate Precision.
Args:
tp (int): amount of TP.
fp (int): amount of FP.
Returns:
float: precision for the given amounts of TP and FP.
"""
if tp + fp != 0:
precision = float(tp / (tp + fp))
else:
# prevent zero division error.
precision = 0
return precision | 60a5908da00f20c718c17bd5d3cd1306751256e0 | 122,992 |
def absolute_to_relative_timestamps(profile):
"""Change timestamps from absolute to relative times.
:param profile: a memory profile dictionary from memory_profiler
"""
timestamps = profile['timestamp']
baseline = timestamps[0]
profile['timestamp'][:] = [x - baseline for x in timestamps]
return profile | 18b46edb31019254b8afae72b3cf3e8c2d005a29 | 122,993 |
def extract_region(function_arn):
"""
Extracts the AZ from the Lambda's ARN
:param function_arn: The ARN of this Lambda (string)
:return: The AZ (string)
"""
return function_arn.split(':')[3] | 2e42d697672717320977832dafe8db78cca4c11b | 122,996 |
def rename_key(udict, key, new_key):
"""Rename a key in dict
"""
cdict = dict(udict)
_val = cdict.pop(key, "*****N/A*****")
if _val == "*****N/A*****":
return cdict
cdict[new_key] = _val
return cdict | 9fb36096ea46b98ea7fe8bff743ca775210a21dc | 122,997 |
def configure_chromatogram_builder(new_d,min_num_scans,group_intensity_threshold,min_peak_height,mz_tolerance):
"""
new_d = configure_chromatogram_builder(new_d,task.min_num_scans,task.group_intensity_threshold,task.min_peak_height,task.mz_tolerance)
# <batchstep method="net.sf.mzmine.modules.masslistmethods.ADAPchromatogrambuilder.ADAPChromatogramBuilderModule">
# <parameter name="Raw data files" type="ALL_FILES"/>
# <parameter name="Scans">
# <ms_level>1</ms_level>
# </parameter>
# <parameter name="Mass list">masses</parameter>
# <parameter name="Min group size in # of scans">5</parameter>
# <parameter name="Group intensity threshold">1000000.0</parameter>
# <parameter name="Min highest intensity">80000.0</parameter>
# <parameter name="m/z tolerance">
# <absolutetolerance>0.002</absolutetolerance>
# <ppmtolerance>7.0</ppmtolerance>
# </parameter>
# <parameter name="Suffix">chromatograms</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'ADAPChromatogramBuilderModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Min group size' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(min_num_scans)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Group intensity threshold' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(group_intensity_threshold)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Min highest intensity' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(min_peak_height)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
return new_d | 673a1664e7b33a474f2ac32d91ee4f3e67b8cb30 | 123,001 |
def createToolset(filename=None, overwrite=-1, rootPath = None):
"""
createToolset(filename=None, overwrite=-1, rootPath = None) -> None
Creates a tool preset based on the currently selected nodes.
@param filename: Saves the preset as a script with the given file name.
@param overwrite: If 1 (true) always overwrite; if 0 (false) never overwrite; @param rootPath: If specified, use this as the root path to save the Toolset to. If not specified, save to the user's .nuke/Toolsets folder. otherwise, in GUI mode ask the user, in terminal do same as False. Default is -1, meaning 'ask the user'.
"""
return None | cbc0a49616a9ed684d07ff2c40f413a71f2d8a3c | 123,003 |
def failed(message: str) -> str:
"""Simply attaches a failed marker to a message
:param message: The message
:return: String
"""
return "Failed: " + message | a7eae847ecb46de026c40594a0d9c178a26712ae | 123,005 |
def loss_f_rmse(results):
"""Returns the force RMSE.
Parameters
----------
results : :obj:`dict`
Validation results.
"""
return results['force']['rmse'] | 0119802665aa3de36ceb305c16c53890bb3f8261 | 123,006 |
def list2dict(values_list):
"""A super simple function that takes a list of two element iterables
and returns a dictionary of lists keyed by the first element.
This: [("A","1"), ("A","2"), ("B","1"), ("C","1")]
Becomes: {"A": ["1", "2"], "B": ["1"], "C": ["1"] }
Arguments:
- `values_list`: list of two element lists or tuples.
"""
dictionary = dict()
for x in values_list:
elements = dictionary.get(x[0])
if elements:
elements.append(x[1])
dictionary[x[0]] = elements
else:
dictionary[x[0]] = [
x[1],
]
return dictionary | 7934d52e7147580f29b26b10b2dd95af8568fe25 | 123,008 |
def get_class(kls):
"""Returns a class given a fully qualified class name"""
parts = kls.split('.')
module = ".".join(parts[:-1])
mod = __import__(module)
for comp in parts[1:]:
mod = getattr(mod, comp)
return mod | 395002001d67d4e9820f133288fee483e2bfba9d | 123,009 |
def parse_date(dt):
"""
Split the datetime into individual parts, useful for formatting date yourself
"""
return dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second | 9b0894dacddff65591f1a3304c86b3cee8af7398 | 123,011 |
def workflow_marker(marker_name: str):
"""Create a decorator to mark a function for use within a workflow."""
def _marker(func):
func.__workflow_marker__ = marker_name
return func
return _marker | 30757f12718af8c99633a585e861f7cf8c23fb51 | 123,016 |
def rename_node(amr, prefix):
"""
Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.
"""
node_map_dict = {}
inverse_node_map_dict = {}
# map each node to its new name (e.g. "a1")
for i in range(0, len(amr.nodes)):
node_map_dict[amr.nodes[i]] = prefix + str(i)
# update node name
for i, v in enumerate(amr.nodes):
amr.nodes[i] = node_map_dict[v]
# update node name in relations
for node_relations in amr.relations:
for i, l in enumerate(node_relations):
node_relations[i][1] = node_map_dict[l[1]]
for k,v in node_map_dict.items():
inverse_node_map_dict[v] = k
return inverse_node_map_dict | ab82e65a744279432aa921aad1ae45e1739bec5d | 123,018 |
def get_maxpool_layer_indexes(model):
"""
Gets a model and returns the indexes of layers containing 'MaxPool' as a list
:param model: A nn.Module model
:return: A list of integer numbers
"""
pooling_indexes = []
for i, d in enumerate(model.features):
if str(d).__contains__('MaxPool'):
pooling_indexes.append(i)
return pooling_indexes | 959f6358f56f85e406d20aa630cd9fc05b7c3008 | 123,019 |
def om_values(values, year):
"""
Return a list of values for an observation model parameter.
:param values: Values may be scalar, a list of scalars, or a dictionary
that maps years to a scalar or list of scalars.
:param year: The calendar year of the simulation.
:raises ValueError: If the values are a dictionary and the year is not a
valid key, or if there are duplicate values.
:raises TypeError: If the values are not one of the types defined above.
"""
if isinstance(values, dict):
# The values are stored in a dictionary and are indexed by year.
if year in values:
values = values[year]
else:
raise ValueError('Invalid year {}'.format(year))
try:
# Try treating the values as a list of values.
values = list(values)
except TypeError:
# If that fails, assume we have a scalar value.
return [values]
# Detect duplicate values.
if len(set(values)) == len(values):
return values
else:
raise ValueError("Duplicate values in {}".format(values)) | 4b95e419e12494ce2af412e004ba6028e79e5310 | 123,020 |
import ipaddress
def get_bridge_domain(aci_subnets, eip):
"""
function to return a bridge domain name from tenant, app and end device IP
:param aci_subnets: List of aci subnets
:param eip: ACO end device IP
:return: Bridge domain name
"""
for bd_name, networks in aci_subnets.items():
for network in networks:
if ipaddress.ip_address(eip) in ipaddress.IPv4Network(network.addr, strict=False):
return bd_name
return None | 5ae315fa6baa3c1575a87074aa6cf684d4e0d2e7 | 123,022 |
def is_compatible_broadcast_shape(src_shape, dst_shape):
"""
Check if src_shape can be broadcasted to dst_shape
dst_shape needs to be numpy compatible with src_shape, and each matching
dimension for dst_shape must be equal or larger than src_shape
Args:
src_shape: tuple or list, shape of the source tensor
dst_shape: tuple or list, shape of the desination tensor
Returns:
True if src_shape can be broadcasted to dst_shape
"""
if len(src_shape) > len(dst_shape):
return False
is_compatible = lambda l, r: l == 1 or l == r
for l, r in zip(src_shape, dst_shape[-len(src_shape):]):
if not is_compatible(l, r):
return False
return True | 70c34c5c743637d9cc1bbc02a1759b7caf158008 | 123,023 |
from typing import List
def sympy_cell_line_lists(cell: str) -> List[List[str]]:
"""
Converts sympy cell to list of lists of str
"""
raw_lines = cell.split("\n")
raw_line_components = [[elem.strip() for elem in line.split("=")] for line in raw_lines]
return raw_line_components | 3ec2f580c6c73d27cd9aa1d87da451b222df5cad | 123,024 |
def is_inline(tag):
"""
A filter function, made to be used with BeautifulSoup.
Makes sure the tag is `inline`, or has both data-inline-type
and at least one of data-inline-{id,ids,filter} attributes.
"""
check_type = lambda attr: attr[0] == 'data-inline-type'
check_attrs = lambda attr: attr[0] == 'data-inline-id' \
or attr[0] == 'data-inline-ids' \
or attr[0] == 'data-inline-filter'
checks_out = filter(check_type, tag.attrs) and filter(check_attrs, tag.attrs)
return checks_out or tag.name == 'inline' | ad207188de13c487c420d84645f460b175a16e88 | 123,025 |
from typing import Tuple
def convert_version_from_tuple_to_str(string: Tuple[int, ...]) -> str:
"""Convert program version from tuple to string."""
return ".".join(str(i) for i in string) | f8a1424bddb04d2b40b53a43bd3c67a3e0bcfec1 | 123,026 |
def usage(err=''):
""" Prints the Usage() statement for the program """
m = '%s\n' %err
m += ' Default usage is to get changed MODs and update sForce.\n'
m += ' '
m += ' sfMODmanage -cl do (sync TBs from MODs)\n'
m += ' or\n'
m += ' sfTaskBranch -cl -p3600 do (sync MODs update last 3600 secs) \n'
return m | d5cd4ab02fc1e026da4c59de5a4ac39e16d0e310 | 123,033 |
def GetBuildEquivalentForSourceRunMessage(serv, pack, source):
"""Returns a user message for equivalent gcloud commands for source deploy.
Args:
serv: name of the service
pack: the pack arguments used to build the service image
source: the location of the source
"""
build_flag = ''
if pack:
build_flag = '--pack image=[IMAGE]'
else:
build_flag = '--tag [IMAGE]'
msg = ('This command is equivalent to running '
'`gcloud builds submit {build_flag} {source}` and '
'`gcloud run deploy {serv} --image [IMAGE]`\n')
return msg.format(
serv=serv,
build_flag=build_flag,
source=source) | 66b209741e6090c5584c4207302a1e898c8606dd | 123,037 |
from typing import Optional
from typing import Any
def replace_none(elem: Optional[Any], default: Any):
"""If elem is None, return default, else return elem
Args:
elem: element to possibly return
default: default element
Returns:
elem, or default
"""
if elem is None:
return default
else:
return elem | 4e0f59bb0ae228e28c58d2a651d4410073001a73 | 123,038 |
import json
def loadjstr(string: str) -> dict:
"""
Load a JSON formatted string into a dictionary.
Args:
string: JSON formatted string
Returns:
dictionary representation of the JSON
"""
return json.loads(string) | 2e1a148ffefb740b552ef0c82403ddee3d808958 | 123,040 |
def runlength(blk, p0, p1):
""" p0 is the source position, p1 is the dest position
Return the length of run of common chars
"""
for i in range(1, len(blk) - p1):
if blk[p0:p0+i] != blk[p1:p1+i]:
return i - 1
return len(blk) - p1 - 1 | faa6e6e4980140496ebd018730d8cb47dd003b69 | 123,042 |
from typing import Literal
from typing import List
def get_foot_sensor(foot: Literal["left", "right"], include_insole: bool = True) -> List[str]:
"""Get the names of all sensors that are attached to a foot (left or right)."""
sensors = ["{}_cavity", "{}_heel", "{}_lateral", "{}_medial", "{}_instep"]
if include_insole is True:
sensors.append("{}_insole")
return [s.format(foot[0]) for s in sensors] | b2d4f38c62d94c5a3c68846ff1ecdccd91f42f86 | 123,047 |
import torch
def dot_score(a: torch.Tensor, b: torch.Tensor):
"""
Computes the dot-product dot_prod(a[i], b[j]) for all i and j.
:return: Matrix with res[i][j] = dot_prod(a[i], b[j])
"""
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
return torch.mm(a, b.transpose(0, 1)) | 553b206c0d789ab0c11235ca380073fc65adbdd1 | 123,048 |
from typing import List
from typing import Tuple
from pathlib import Path
import re
def get_examples_sorted(root_path: str, day: int) -> List[Tuple[int, Path]]:
"""
Gets all example files for a given day, in numeric sort order
Parameters
----------
root_path
The path of the root AOC problem directory
day:
The AOC problem day, e.g., 1, 2, ... 25
"""
example_regex = re.compile(r"example_([0-9]+).txt")
def map_example(example):
res = example_regex.match(example.name)
if not res:
raise Exception(f"invalid example {example}")
return (int(res.groups()[0]), example)
examples = Path(root_path).joinpath('data', f"d{day:02}").glob('example_*')
return sorted([map_example(x) for x in examples]) | cc9f0f16c74c32d24977b42dfa5f5c763d75106c | 123,049 |
def bitwise_add(a: int, b: int) -> int:
"""Merge two binary masks together."""
return a | b | 62880f26eea48fe0cfc46f9fc73cc29f9a4c3f17 | 123,051 |
def get_cleanname(item):
"""Extract a test name from a pytest Function item."""
if '[' in item.name:
cleanname = item.name.split('[', 1)[1]
cleanname = cleanname.split(']', 1)[0]
return cleanname
return item.name | de5c71446826fe62cedb10f04ea79e295385d7c6 | 123,055 |
def to_marker_edge(marker_size, marker):
"""
get edge shrink_target
:param marker_size: int
:param marker: default 'o'
:return: float
"""
if marker in "s^>v<d": # `large` markers need extra space
return pow(2*marker_size,0.5)/2
else:
return pow(marker_size,0.5)/2 | 541ce8dd374ae547a1f44076b91b2aeb06ebe6e3 | 123,064 |
def select_one(iterable, where, equals):
"""Returns the first object of iterable matching given attribute value."""
for item in iterable:
if getattr(item, where, None) == equals:
return item | 2fcc63fe7ae6ffe438aa8a4fea3f7e90dcb1b52f | 123,065 |
def interpret_reply(reply_byte, packet_holder, verbose=False):
"""Interprets a reply byte message.
Parameters
----------
reply_byte : byte message returned from serial_object.read(serial_object.inWaiting())
Often the reply contains an echo of the message sent. This part should be removed first.
packet_holder : Holder
Holder containing the packet and extended information about the packet that was originally sent.
See :py:class:`cctalk.coin_messenger.CoinMessenger` for Holder construction.
verbose : bool
Flag to be more verbose.
Returns
-------
reply : The type dependes on the type expected.
Reply to the message in the type expected.
Raises
------
UserWarning
If a simple pool did not return an expected message.
Assumes 1,2 for send recieve hosts.
"""
h = packet_holder
reply_length = h.bytes_returned
reply_type = h.type_returned
reply_int = list(map(ord, reply_byte))
if len(reply_int) < 2:
print('Recieved small packet int: {0} byte:{1}'.format(reply_int, reply_byte))
return False
msg_length = reply_int[1]
if verbose:
print("Recieved {0} bytes:".format(msg_length))
if msg_length != reply_length:
print('Message length != return_length. ml: {0} rl:{1}'.format(msg_length, reply_length))
return False
if h.request_code == 254:
expected_reply = [1, 0, 2, 0, 253]
if reply_int != expected_reply:
msg = "Simple pool did not return expected message."
raise UserWarning(msg, (reply_int, expected_reply))
reply_msg_int = reply_int[4:-1]
reply_msg_byte = reply_byte[4:-1]
if reply_type is str:
return str().join(reply_msg_byte)
elif reply_type is int:
return reply_msg_int
elif reply_type is bool:
return True
else:
return reply_msg_byte | c9860d3169a9d1d9634472ce1b16bac95ca0cad4 | 123,075 |
import re
def parse_ping(stdout):
"""
Parses the result of the ping command for eos and sonic.
Args:
stdout: The stdout_lines output of the eos_ping or sonic_ping.
Returns:
A list of dictionaries, one per packet. The dictionary has the following keys:
-icmp_seq: the sequence number of the packet
-bytes: the received packet size
-ttl: the received ttl
-time: the received round trip time.
-ttl_exceeded: flag for whether a ttl exceeded error was recieved.
"""
parsed_lines = []
for line in stdout:
# 64 bytes from 100.0.0.1: icmp_seq=1 ttl=63 time=1.32 ms
parsed = {}
match = re.search(r"icmp_seq=(\d+)", line)
if match:
parsed['icmp_seq'] = match.group(1)
else:
continue
match = re.search(r"(\d+) bytes", line)
if match:
parsed['bytes'] = match.group(1)
match = re.search(r"ttl=(\d+)", line)
if match:
parsed['ttl'] = match.group(1)
match = re.search(r"time=([\.\d]+)", line)
if match:
parsed['time'] = match.group(1)
match = re.search(r"Time[\w\s]+exceeded", line)
if match:
parsed['ttl_exceed'] = True
else:
parsed['ttl_exceed'] = False
if parsed != {}:
parsed_lines.append(parsed)
return parsed_lines | 498f2a9ad0f3010719f2b12d86d79facae12ab54 | 123,077 |
def simple_func(x):
"""
test 1: list
>>> x = [1, 2, 3]
>>> simple_func(x)
12
test 2: tuple
>>> x = (1, 2, 3)
>>> simple_func(x)
Traceback (most recent call last):
...
TypeError: Oh! Not tuple!
"""
if isinstance(x, tuple):
raise TypeError("Oh! Not tuple!")
else:
return 2 * sum(x) | 0e3a6a547a10bdb5c429faf0c9a7f695d9c1c4d1 | 123,079 |
def dicts_equal(dictionary_one, dictionary_two):
"""
Return True if all keys and values are the same between two dictionaries.
"""
return all(
k in dictionary_two and dictionary_one[k] == dictionary_two[k]
for k in dictionary_one
) and all(
k in dictionary_one and dictionary_one[k] == dictionary_two[k]
for k in dictionary_two
) | abd7d1d89c034c8f57b34b3994f2f8a32b63987e | 123,080 |
def calculate_maximum_position(velocity: int) -> int:
"""Calculate the maximum position if `velocity` decreases by one after each step"""
final_position = (velocity * (velocity + 1)) // 2 # Gauss summation strikes again
return final_position | a883c6596f7248a3b6fee283621af129e984d736 | 123,081 |
import re
def extract_field_from_id3tool_out(id3tool_out, field_id):
"""
Extracts a field from the id3tool output.
"""
m = re.search(r'^'+field_id+r':\s*(.*)$', id3tool_out, re.MULTILINE)
if m:
return m.group(1)
else:
return None | 82c55e7708a416e57cee964ee1fce7c1c8083dfc | 123,083 |
def _split_name(name):
"""Splits given state name (model or optimizer state name) into the param_name, optimizer_key, view_num and the fp16_key"""
name_split = name.split('_view_')
view_num = None
if(len(name_split) > 1):
view_num = int(name_split[1])
optimizer_key = ''
fp16_key = ''
if name_split[0].startswith('Moment_1'):
optimizer_key = 'Moment_1_'
elif name_split[0].startswith('Moment_2'):
optimizer_key = 'Moment_2_'
elif name_split[0].startswith('Update_Count'):
optimizer_key = 'Update_Count_'
elif name_split[0].endswith('_fp16'):
fp16_key = '_fp16'
param_name = name_split[0]
if optimizer_key != '':
param_name = param_name.split(optimizer_key)[1]
param_name = param_name.split('_fp16')[0]
return param_name, optimizer_key, view_num, fp16_key | f4178f35f4e8acdd25f6fbba45f61e2650f7844a | 123,085 |
def el_in_keys(ddict, el):
"""
Checks if a dict has element in its keys
"""
if el in ddict.keys():
return True
else:
return False | 4ec0996137037d8e569a9ebf67c3e21973743961 | 123,092 |
def remove_csrf_from_params_dict(data):
"""
Removes csrf token from a dict
"""
pd = data
try:
del pd['csrfmiddlewaretoken']
except KeyError:
pass
return pd | 0e90b71a5d3493436eaf48f8310cc3f92ea106ae | 123,096 |
import math
def color_from_temperature(temp):
"""Convert a kelvin temperature to RGB. Algorithm from
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/"""
temp /= 100
if temp <= 66:
red = 255
else:
red = temp - 60
red = 329.698727446 * (red ** -0.133047592)
red = max(0, red)
red = min(255, red)
if temp <= 66:
green = temp
green = 99.4708025861 * math.log(green) - 161.1195681661
green = max(0, green)
green = min(255, green)
else:
green = temp - 60
green = 288.1221695283 * (green ** -0.0755148492)
green = max(0, green)
green = min(255, green)
if temp >= 66:
blue = 255
else:
if temp <= 19:
blue = 0
else:
blue = temp - 10
blue = 138.5177312231 * math.log(blue) - 305.0447927307
blue = max(0, blue)
blue = min(255, blue)
return (red / 255, green / 255, blue / 255) | 9436e7192bbdf3d73d84307ef5b46ee81f13d077 | 123,097 |
def findSvgLayer(layers,masterId):
"""
Finds the 'svg' layer associated with a specific master ID
Arguments:
layers {arr} -- array of GSLayers of a glyph
masterId {str} -- unique ID of master
Returns: layer object
"""
for layer in layers:
# Find the svg layer associated with the current master
if layer.name == 'svg' and layer.associatedMasterId == masterId:
return layer
return None | 60b7b4831cb5d2afcba894ae28d0b2305d633ac2 | 123,099 |
def generic_setter(value, class_):
"""Generic setter for class objects
Args:
value: The value to be validated.
class_: The class to validate with
Raises:
TypeError: If the value is not an instance of the class
"""
if isinstance(value, class_) or value is None:
return value
raise TypeError(f"The value has to be an instance of the "
f"{class_.__name__} class. {type(value)} passed") | 542722430c970f4a88e68a65d37f68feb6ca9bc4 | 123,101 |
def binary(num, size):
"""
num: int
size: int
Takes in a number and returns it's binary equivalent.
Adds trailing zeroes at the beginning to make the
length of binary equivalent equal to size.
"""
binary_out = ''
while num > 0:
binary_out += str(num % 2)
num //= 2
binary_out += (size - len(binary_out)) * '0'
return binary_out[::-1] | bce096a2282393203a1b7d28026bedd57be802ae | 123,106 |
def Trim(lst, limit):
"""Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed.
"""
limit = max(0, limit)
clipping = lst[limit:]
del lst[limit:]
return clipping | 8b450a14eb94bd4f8bb71bb63c159f36468ca6b2 | 123,107 |
def brix_to_sg(brix):
"""Convert the provided value from brix to specific gravity and return the result.
Help pulled from https://straighttothepint.com/specific-gravity-brix-plato-conversion-calculators/"""
return (brix / (258.6 - ((brix / 258.2) * 227.1))) + 1 | 52e0f146c0167cc7cbeb8728e89f62257d7d7dce | 123,110 |
def clean_multiline(s: str) -> str:
"""Utility function for cleaning whitespace from a multiline string"""
return '\n'.join([line.strip() for line in s.split('\n') if line.strip()]) | d2993276b5bb42d2ff7a5c26d0e439f7baa7e37b | 123,114 |
import math
def round_significant_digits(n, sd):
"""Round the given number by significant digits.
Args:
n: The given number, either integer or floating point number.
sd: Significant digits.
Returns:
Rounded number.
"""
if n == 0:
return 0
return round(n, -int(math.floor(math.log10(abs(n))) - (sd - 1))) | ee591600ad64d6366d07a839b6a93fbcd4beb8a7 | 123,119 |
def find_nth(s: str, x: str, n: int = 0, overlap: bool = False) -> int:
"""
Finds the position of *n*\ th occurrence of ``x`` in ``s``, or ``-1`` if
there isn't one.
- The ``n`` parameter is zero-based (i.e. 0 for the first, 1 for the
second...).
- If ``overlap`` is true, allows fragments to overlap. If not, they must be
distinct.
As per
https://stackoverflow.com/questions/1883980/find-the-nth-occurrence-of-substring-in-a-string
""" # noqa
length_of_fragment = 1 if overlap else len(x)
i = -length_of_fragment
for _ in range(n + 1):
i = s.find(x, i + length_of_fragment)
if i < 0:
break
return i | 5a66a0fd8d9eb07ebcfab7cc93580124abde3b8c | 123,128 |
def remove_properties_and_basics(resource_data):
"""
Given a mapping of resource_data attributes to use as "kwargs", return a new
mapping with the known properties removed.
"""
return dict([(k, v) for k, v in resource_data.items()
if k not in ('type', 'base_name', 'extension', 'path', 'name')]) | 2b893c052d506e9d28404c2f0e9a16c2a88e12f9 | 123,129 |
def FGTS(salarioBruto: float, numeroDeMeses: int = 12, incluirMulta: bool = False,
incluirRendimento: bool = False) -> float:
"""
Calcula o valor do FGTS recebido durante determinado periodo de tempo.
É possível selecionar se deseja incluir os valor dos rendimentos ou somente os aportes.
Está incluso o recebimento do FGTS referente às duas parcelas do 13o salário (a primeira no mês 6 e a segunda no mês 11)
Também está incluso o recebimento do FGTS referente à 1/3 de Férias
:param salarioBruto, numeroDeMeses, incluirMulta, incluirRendimento
:return: saldoFGTS
"""
RendimentoAnual = 0.03 # Rendimento FGTS 3% a.a
# Incluir rendimento do FGTS no cálculo?
if incluirRendimento:
RendimentoMensal = pow(1 + RendimentoAnual, 1 / 12) - 1 # Cálculo do rendimento mensal
else:
RendimentoMensal = 0
AportesMensais = salarioBruto * 0.08 # Valor depositado na conta do FGTS mensalmente
saldoFGTS = 0 # Saldo inicial
for mes in range(numeroDeMeses):
if mes == 6: # Primeira Parcela do 13o Salário (data aproximada)
saldoFGTS = saldoFGTS * (1 + RendimentoMensal) + 1.5 * AportesMensais
elif mes == 11: # Segunda Parcela do 13o Salário + 1/3 de Ferias
saldoFGTS = saldoFGTS * (1 + RendimentoMensal) + 1.8 * AportesMensais
else:
saldoFGTS = saldoFGTS * (1 + RendimentoMensal) + AportesMensais
if incluirMulta:
saldoFGTS += AportesMensais * numeroDeMeses * 0.4 # 40% de multa por demissão sem justa causa
saldoFGTS = round(saldoFGTS, 2)
return saldoFGTS | dece9690e381d9fbcd5d2caf2d4410cb863c5186 | 123,133 |
import torch
def load_checkpoint(checkpoint_name, device='cpu'):
"""
DESCRIPTION: The function loads a torchvision model along with all attributes.
INPUT: <checkpoint_name>: Full path of the checkpoint file (incl. file extension)
<device> (default='cpu'): The device on which the model should be loaded.
Typically 'cuda' to make use of the GPU (fast) or 'cpu' (slow)
OUTPUT: <model>: The model along with its state_dict as it was saved in the checkpoint file
<optimizer>: The optimizer along with its state_dict as it was saved in the checkpoint file
<epochs>: The number of epochs as it was saved in the checkpoint file
"""
#01. Load the checkpoint
if device == 'cuda':
device += ':0'
cp = torch.load(checkpoint_name, map_location=device)
#02. Apply the checkpoint attributes
model = cp['model']
model.load_state_dict(cp['model_state_dict'])
model.class_to_idx = cp['class_to_idx']
optimizer = cp['optimizer']
optimizer.load_state_dict(cp['optimizer_state_dict'])
epochs = cp['epochs']
#03. Return the updated results
return model, optimizer, epochs | dc2a7c879b5791f9f98e90bf8b18a6bf539c48e7 | 123,134 |
def read_file_to_list(file_path):
""" Reads a text file line by line.
Args:
file_path: string
Returns: list[string]
"""
with open(file_path) as f:
return [x.strip() for x in f.readlines() if x.strip() != ''] | b14753abf17d5a94222cc0d7243ef3760d0fba99 | 123,135 |
from typing import Collection
def remove_space(toks: Collection[str]) -> Collection[str]:
"""
Do not include space for bag-of-word models.
:param Collection[str] toks: list of tokens
:return: Collection of tokens where space tokens (" ") are filtered out
:rtype: Collection[str]
:Example:
>>> toks = ['ฉัน','เดิน',' ','กลับ','บ้าน']
>>> remove_space(toks)
['ฉัน','เดิน','กลับ','บ้าน']
"""
res = []
for t in toks:
t = t.strip()
if t:
res.append(t)
return res | 25f252c58d878286f65d3bf85f5336ba98978636 | 123,143 |
def linear(input_df, input_col, output_col,
start=None, end=None,
output_min=1, output_max=9):
"""
Rescale a column in a DataFrame linearly.
If argument start is greater than end, the rescaling is in the
same direction as values in the input column, i.e., smaller (bigger) values
in the input column correspond to smaller (bigger) values in the output.
If argument start is less than end, the rescaling is in the reverse
direction as values in the input column.
The start and end of the input column do not necessarily to be the minimum
and maximum of the input column. Values beyond the specified bound will be
assigned to output_min and output_max, depending on which side they are on.
Parameters
----------
input_df : DataFrame or GeoDataFrame
Input DataFrame containing a column need to be rescaled.
input_col : str
Name of the old column.
output_col : str
Name of the new column.
start : int or float
Value from which the rescaling starts.
end : int or float
Value from which the rescaling ends.
output_min : int or float
The minimum value of the output column.
output_max : int or float
The maximum value of the output column.
Returns
-------
input_df : DataFrame or GeoDataFrame
Output DataFrame containing the rescaled column.
"""
if start is None:
start = input_df[input_col].min()
if end is None:
end = input_df[input_col].max()
input_range = abs(end - start)
output_range = abs(output_max - output_min)
if end > start:
input_df[output_col] = (
output_min
+ (input_df[input_col] - start) * output_range / input_range
)
input_df.loc[input_df[input_col] > end, output_col] = output_max
input_df.loc[input_df[input_col] < start, output_col] = output_min
else:
input_df[output_col] = (
output_max
- (input_df[input_col] - end) * output_range / input_range
)
input_df.loc[input_df[input_col] < end, output_col] = output_max
input_df.loc[input_df[input_col] > start, output_col] = output_min
return input_df | 613b465130b11cd5cecde2604a887746dc360c5b | 123,144 |
def infer_compression(input_filename):
"""
Infers compression for a file from its suffix. For example, given "/tmp/hello.gz", this will return "gzip"
>>> infer_compression("/tmp/hello.gz")
'gzip'
>>> infer_compression("/tmp/abc.txt") is None
True
"""
parts = input_filename.split('.')
if len(parts) <= 1: return None
suffix = parts[-1]
if suffix == 'gz': return 'gzip'
if suffix == 'bz2': return 'bz2'
if suffix =='zip': return 'zip'
if suffix == 'xz': return 'xz'
return None | d6689ec8f4cc6efb6bd81509edcb443f6d17e7e7 | 123,146 |
import re
def merge_spaces(s, pat = re.compile(r'\s+')):
"""Merge multiple spaces, replace newlines and tabs with spaces, strip leading/trailing space."""
return pat.sub(' ', s).strip() | fd1efa16d50d49c7a4512a7b6e43a0600ca1897b | 123,147 |
import re
def clean_str(string):
"""Clean a string from everything except word characters,
replace spaces with '_'
:param string: string to clean
:type string: str
:returns: result
:rtype: str
"""
string = re.sub(r"\s", "_", string.strip())
return re.sub(r"[^\w]", "", string) | cd3a0b0c5d4a9b1ac75eb9884625e4c9ea2720c7 | 123,148 |
def xor_hashes(*elements: object) -> int:
"""Combine all element's hashes with xor
"""
_hash = 0
for element in elements:
_hash ^= hash(element)
return _hash | 791a589edd3e09d98657b493c76e08912ad7b9ab | 123,152 |
def get_dicts_generator(word_min_freq=4,
char_min_freq=2,
word_ignore_case=False,
char_ignore_case=False):
"""Get word and character dictionaries from sentences.
:param word_min_freq: The minimum frequency of a word.
:param char_min_freq: The minimum frequency of a character.
:param word_ignore_case: Word will be transformed to lower case before saving to dictionary.
:param char_ignore_case: Character will be transformed to lower case before saving to dictionary.
:return gen: A closure that accepts sentences and returns the dictionaries.
"""
word_count, char_count = {}, {}
def get_dicts(sentence=None,
return_dict=False):
"""Update and return dictionaries for each sentence.
:param sentence: A list of strings representing the sentence.
:param return_dict: Returns the dictionaries if it is True.
:return word_dict, char_dict, max_word_len:
"""
if sentence is not None:
for word in sentence:
if not word:
continue
if word_ignore_case:
word_key = word.lower()
else:
word_key = word
word_count[word_key] = word_count.get(word_key, 0) + 1
for char in word:
if char_ignore_case:
char_key = char.lower()
else:
char_key = char
char_count[char_key] = char_count.get(char_key, 0) + 1
if not return_dict:
return None
word_dict, char_dict = {'': 0, '<UNK>': 1}, {'': 0, '<UNK>': 1}
max_word_len = 0
for word, count in word_count.items():
if count >= word_min_freq:
word_dict[word] = len(word_dict)
max_word_len = max(max_word_len, len(word))
for char, count in char_count.items():
if count >= char_min_freq:
char_dict[char] = len(char_dict)
return word_dict, char_dict, max_word_len
return get_dicts | 81d2660c9cdca4517d38d4f9b055336b011ef24a | 123,153 |
def href_not_hash(tag):
"""
BeautifulSoup filter function that returns
false if the tag is None, does not have the href attribute
or the href attribute of the tag starts with #
:param tag: The tag to check
:return: Returns true if the href attribute does not start with #
"""
if tag is None:
return False
elif tag.has_attr('href'):
return tag['href'][0] != '#'
else:
return False | 44e2a3bff7a6bce439fadb1e533e244e4596a7a7 | 123,154 |
import hashlib
def generate_hash(image_bytes : bytes, algoritm : str) -> str:
"""Hash the given image with the given hashing algorithm"""
hash_function = hashlib.new(algoritm)
hash_function.update(image_bytes)
return hash_function.hexdigest() | 1de351d1df331af074b481fe73305ad6107de0cc | 123,155 |
def hex_byte_to_bits(hex_byte):
"""
Interpret a single hex character as bits.
This function will accept a hex byte (two characters, 0-F)
and cast it to a list of eight bits. Credit to StackOverflow
user: "Beebs" for concise function implementation.
Parameters
----------
hex_byte: str
String of two characters representing the byte
in hex.
Returns
-------
bits: list of int
List of eight bits (0/1) representing each bit
position in the input byte.
See Also
--------
hex_bits_from_str : Determine the bit-structure of multiple
hex bytes in a string.
"""
binary_byte = bin(int(hex_byte, base=16))
# Use zfill to pad the string with zeros as we want
# all 8 digits of the byte.
bits_string = binary_byte[2:].zfill(8)
return [int(bit) for bit in bits_string] | 12bdd7acb1125f8352a302ecaba22091a4642a27 | 123,158 |
import time
def time_it(func):
"""Decorator that prints the time the decorated function took to run"""
def run():
start = time.time()
func()
print(time.time() - start)
return run | 993f09c04fa3f48a41101c4f223597d78e0b3f17 | 123,159 |
import tempfile
def build_configuration_and_files(configuration, temp_directory=None):
"""
Parse Run Process arguments and create files for streams without arguments
:param configuration: robot call argument
:type configuration: dict[str, str]
:param temp_directory: (Optional) directory to store temp files, by default - system temp
:type temp_directory: str
:return: tuple(dict[str, str], list[str]) -- tuple of new configuration and list of temporary files
"""
new_files = []
new_configuration = configuration.copy()
for stream in ('stdout', 'stderr'):
if stream not in configuration:
_1, new_file = tempfile.mkstemp(prefix='robot.run_process.', dir=temp_directory)
print('Temporary file {!r} has been created for stream {!r}. Directory = {!r}'
.format(new_file, stream, temp_directory))
new_configuration[stream] = new_file
new_files.append(new_file)
return new_configuration, new_files | 84bb8d27cc77f56e0a8a402f8a30fb5046f84ff5 | 123,161 |
def collect_dataset_entities(dataset_y):
""" 收集样本数据集内,所有的实体,并按照类型进行汇总。
主要用于整理实体词典,用于 NER 的数据增强等。
Args:
dataset_y: 数据集的所有样本中,包含的实体的输出标签,如样例所示
Return:
dict(dict):
各个类型实体的词典(实体类型、出现频数)
Examples:
>>> import jionlp as jio
>>> dataset_y = [[{'type': 'Person', 'text': '马成宇', 'offset': (0, 3)},
{'type': 'Company', 'text': '百度', 'offset': (10, 12)}],
[{'type': 'Company', 'text': '国力教育公司', 'offset': (2, 8)}],
[{'type': 'Organization', 'text': '延平区人民法院', 'offset': (0, 7)}],
...] #
>>> entities_dict = jio.ner.collect_dataset_entities(dataset_y)
>>> print(entities_dict)
# {'Person': {'马成宇': 1, '小倩': 1},
# 'Company': {'百度': 4, '国力教育公司': 2},
# 'Organization': {'延平区人民法院': 1, '教育局': 3}}
"""
entities_dict = dict()
for sample_y in dataset_y:
for entity in sample_y:
if entity['type'] in entities_dict:
if entity['text'] in entities_dict[entity['type']]:
entities_dict[entity['type']][entity['text']] += 1
else:
entities_dict[entity['type']].update({entity['text']: 1})
else:
entities_dict.update({entity['type']: dict()})
entities_dict[entity['type']].update({entity['text']: 1})
return entities_dict | 64be0adcf11313a533064a6bcd10b5e6b31cdd28 | 123,162 |
def _entry_to_variant_name(e):
"""Return the variant-specific name for entry |e| in _ALL_TOOLCHAINS."""
return '-'.join(e.get('variants', [])) | a0910b211ab34832ccdfb542d43fdffa34a360e9 | 123,164 |
def get_ndx_group_index(ndxfile, index_strings):
"""Gets the atom group index for the given strings.
INPUT
ndxfile - index file *.ndx filename
index_strings - either a single string, or a *list* of strings
This string MUST be in the form 'Example'
where the field in the *.ndx file is '[ Example ]'
RETURNS
results - if a single string supplied, the index (int)
- elif a list of strings supples, a list of ints
"""
assert ((type(index_strings) == str) | (type(index_strings) == list))
# get all the lines with index headers
fin = open(ndxfile, 'r')
index_lines = [ line.strip() for line in fin.readlines() if line.count('[') > 0 ]
fin.close()
print('ndxfile', ndxfile)
print('index_strings', index_strings)
print('index_lines', index_lines)
if type(index_strings) == str:
return index_lines.index( '[ '+index_strings+' ]' )
if type(index_strings) == list:
return [ index_lines.index( '[ '+s+' ]' ) for s in index_strings]
return None | a6966a89b42934b89b54afac7560efbcc06f8d23 | 123,165 |
def ellipses(text: str, max_length: int):
"""
Takes text as input, and returns it as long as it is less than
max_length. If it is over this, then ellipses are placed over the last
three characters and the substring from the start to the boundary is
returned.
This makes arbitrarily long text blocks safe to place in messages and
embeds.
"""
if len(text) > max_length:
ellipse = '...'
return text[0:max_length - len(ellipse)] + ellipse
else:
return text | 37e0a2501ba18560a27823eaf3092dc7e2bb63bc | 123,166 |
def lowercase(text):
"""
Lowercase given text
:param text:
:return: lower text
"""
return text.lower() | b76b16b9ee94de67d2b3cb7a60f7f51125e77bbf | 123,167 |
def xgcd(a,b):
"""xgcd(a,b) returns a tuple of form (g,x,y), where g is gcd(a,b) and
x,y satisfy the equation g = ax + by."""
a1=1; b1=0; a2=0; b2=1; aneg=1; bneg=1
if(a < 0):
a = -a; aneg=-1
if(b < 0):
b = -b; bneg=-1
while (1):
quot = -(a // b)
a = a % b
a1 = a1 + quot*a2; b1 = b1 + quot*b2
if(a == 0):
return [b, a2*aneg, b2*bneg]
quot = -(b // a)
b = b % a;
a2 = a2 + quot*a1; b2 = b2 + quot*b1
if(b == 0):
return (a, a1*aneg, b1*bneg) | 832bb1c92f17d2d760358038d3fa112e115221b1 | 123,169 |
import re
def removePlexShowYear(title):
"""Removes any trailing year specification of the form '(YYYY)' as is often found in Plex show names"""
return re.sub(r'([\s.\-_]\(\d{4}\))$', '', title) | 8327eab02d7453f8f08371fa273770a45df4bfa8 | 123,173 |
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes | 5a59247fa74aac8d6e8a01ac1535ecfe4880a14b | 123,175 |
def is_on_board(board, level, x, y) -> bool:
"""
checks if coordinates are in board bounds
"""
try:
board[level][x][y]
return True
except IndexError:
return False | def77dcd7c97e69651cf348782b4b1745c678e04 | 123,179 |
from typing import Optional
def no_blank(input_string: Optional[str]) -> bool:
"""Checks whether a string is blank"""
return input_string != "" and input_string is not None | f52a15e406db7542ad053d4f88d40a4446c41412 | 123,183 |
def substract(x,y):
"""Substract x from y and return value """
return y - x | 2dc093dda7b888c026fd1d38b96a3b32df541695 | 123,185 |
from typing import Tuple
from typing import Set
def get_common_and_diff_members(
obj_a: object, obj_b: object
) -> Tuple[Set[str], Set[str]]:
"""
Return the commonalities and differences between the two objects public API.
Parameters
----------
obj_a: object
Object to find commonalities and difference when compared with obj_b.
obj_b: object
Object to find commonalities and difference when compared with obj_a.
Returns
-------
tuple
A tuple of two sets of strings where the first is the commonalities and second is the differences.
"""
is_public = lambda name: not (name.startswith('__') or name.startswith('_'))
obj_a_dir, obj_b_dir = (
set(filter(is_public, dir(obj_a))),
set(filter(is_public, dir(obj_b))),
)
common_members = obj_a_dir.intersection(obj_b_dir)
diff_members = obj_a_dir.symmetric_difference(obj_b_dir)
return (common_members, diff_members) | 57f1629569a2fd7afcdeae791496eee78e5471e4 | 123,191 |
def calculate_resistivity(T, L_wire, rho, alpha, **kwargs):
"""
Temperature dependent resistivity model.
Args:
T: Temperature of wire (K)
L_wire: Length of wire (cm)
rho: Resistivity (Ohms*cm)
alpha: Reference resistance coefficient
**kwargs: Catch unused arguments
Returns:
Resistance (Ohms)l
"""
L_wire, rho_0, alpha = L_wire[0], rho[0], alpha[0]
T_ref = 293.15 # 20 deg C Reference temperature for rho
delta_T = T - T_ref
rho_s = rho_0 * L_wire
rho_t = rho_s * (1 + alpha * delta_T)
return rho_t | d6f1e44dde91e051ad92c38a7a4e2f19d635ee1d | 123,192 |
def calc_num_opening_mismatches(matches):
"""(Internal) Count the number of -1 entries at the beginning of a list of numbers.
These -1 correspond to mismatches in the sequence to sequence search.
"""
if matches[0] != -1:
return 0
num = 1
while matches[num] == -1:
num += 1
return num | 6fcd2fd50af7830f52d45674c7feb040c6a9f69e | 123,196 |
def get_longitude_direction(longitude_degrees):
"""
Returns the direction for the given longitude degrees.
:param longitude_degrees: The degrees (not minutes) as an integer.
:return: String containing the possible latitude directions (W, E).
"""
if longitude_degrees is None:
raise ValueError('No value provided for <longitude_degrees>')
if longitude_degrees < 0:
return "W"
elif longitude_degrees > 0:
return "E"
else:
return "" | 4c64f03ede462cef516b8de3e035e0cd5b9d7479 | 123,204 |
from typing import List
import sympy
def prime_factorize(n: int) -> List[int]:
"""
Return the list of divisors for the given integer
>>> prime_factorize(15485857 * 15485863)
[15485857, 15485863]
"""
result = []
for prime, multiplicity in sympy.factorint(n).items():
result += [prime] * multiplicity
return result | d70fea9c93f67a523c850f9e86b3fedc72697684 | 123,205 |
def format_currency(value, decimals=2):
"""
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
"""
number, decimal = ((u'%%.%df' % decimals) % value).split(u'.')
parts = []
while len(number) > 3:
part, number = number[-3:], number[:-3]
parts.append(part)
parts.append(number)
parts.reverse()
if int(decimal) == 0:
return u','.join(parts)
else:
return u','.join(parts) + u'.' + decimal | f1956aaf97100b57b82ef546f046b87d1c72f71e | 123,206 |
import torch
def checkDevice() -> str :
"""Check if GPU available else Use CPU
Returns:
str: "cuda" if GPU is available else "cpu"
"""
return "cuda" if torch.cuda.is_available() else "cpu" | b8d3a80e879666c8e3486aa1b598f28136553476 | 123,210 |
import typing
def parse_arguments(command_line: str, **kwargs):
"""Run a command in the shell, without waiting for the command to finish.
Args:
command_line: Command line to execute.
**kwargs: Additional arguments for subprocess.run().
Returns:
Result from the command.
Raises:
subprocess.CalledProcessError if the process fails.
"""
run_kwargs: typing.Dict[str, typing.Any] = {'shell': True}
if kwargs:
run_kwargs.update(kwargs)
run_kwargs['args'] = command_line
return run_kwargs | 4a531f65f6223d9fc183717dd7587ebed2a9b14f | 123,212 |
import json
def read_json(file_name_with_path: str) -> dict:
"""read_json function
Args:
file_name_with_path (string): file name of json with path
Returns:
dict: json dict data
Examples:
>>> print(read_json("./name.json"))
{name: "Masumi"}
"""
json_open = open(file_name_with_path, 'r')
return json.load(json_open) | a3e2e34aca038be638a831695c92f22906bcf892 | 123,215 |
def subPt(ptA, ptB):
"""Substract two vectors"""
return ptA[0] - ptB[0], ptA[1] - ptB[1] | 1bf8a08cc60ae0194609f2ae1a584d207ecf108d | 123,218 |
def clean_TeX(string):
"""
Remove or replace math mode symbols like underscore
"""
return string.replace("_"," ") | 40da00feed5c13580c3a3078221ee7a939c590f7 | 123,220 |
def digits_in_base_as_tuple(x, base):
"""
x is int
base is int
gets the digits of x in the new base
e.g. digits_in_base_as_tuple(20, 2) == (1,0,1,0,0)
"""
cur = x
digs = []
while cur:
digs.append(cur % base)
cur /= base
return tuple(reversed(digs)) | e5ee8868b8f3abaf029a296522949e50eb1bebe9 | 123,221 |
def summary_title(tile_summary):
"""
Obtain tile summary title.
Args:
tile_summary: TileSummary object.
Returns:
The tile summary title.
"""
return "Slide %03d Tile Summary:" % tile_summary.slide_num | e34509ae6014b702cf12ea7d33de6bebb590875d | 123,223 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.