content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import numpy
def basins_manually(cube, lat_array, lon_array, pacific_lon_bounds, indian_lon_bounds):
"""Define basins manually (i.e. without assistance from CMIP basin file)
in the output array:
north atlantic = 11
south atlantic = 12
north pacific = 13
south pacific = 14
indian = 15
arctic = 16
marginal seas = 17
land = 18
"""
assert cube.shape == lat_array.shape == lon_array.shape
# Split the world into three basins
basin_array = numpy.ones(lat_array.shape) * 11
basin_array = numpy.where((lon_array >= pacific_lon_bounds[0]) & (lon_array <= pacific_lon_bounds[1]), 13, basin_array)
basin_array = numpy.where((lon_array >= indian_lon_bounds[0]) & (lon_array <= indian_lon_bounds[1]), 15, basin_array)
basin_array = numpy.where((basin_array == 13) & (lon_array >= 275) & (lat_array >= 9), 11, basin_array)
basin_array = numpy.where((basin_array == 13) & (lon_array >= 260) & (lat_array >= 16), 11, basin_array)
basin_array = numpy.where((basin_array == 15) & (lon_array >= 105) & (lat_array >= -8), 13, basin_array)
basin_array = numpy.where((basin_array == 15) & (lat_array >= 25), 17, basin_array)
# Break Pacific and Atlantic into north and south
basin_array = numpy.where((basin_array == 11) & (lat_array < 0), 12, basin_array)
basin_array = numpy.where((basin_array == 13) & (lat_array < 0), 14, basin_array)
# Marginal seas
hudson_lat_bounds = [51, 75]
hudson_lon_bounds = [265, 296]
basin_array = numpy.where((lon_array >= hudson_lon_bounds[0]) & (lon_array <= hudson_lon_bounds[1]) & (lat_array >= hudson_lat_bounds[0]) & (lat_array <= hudson_lat_bounds[1]), 17, basin_array)
baltic_lat_bounds = [54, 65.5]
baltic_lon_bounds = [8.5, 33]
basin_array = numpy.where((lon_array >= baltic_lon_bounds[0]) & (lon_array <= baltic_lon_bounds[1]) & (lat_array >= baltic_lat_bounds[0]) & (lat_array <= baltic_lat_bounds[1]), 17, basin_array)
med_red_black_lat_bounds = [12.5, 45.5]
med_red_black_lon_bounds1 = [0, 43]
med_red_black_lon_bounds2 = [354, 360]
basin_array = numpy.where((lon_array >= med_red_black_lon_bounds1[0]) & (lon_array <= med_red_black_lon_bounds1[1]) & (lat_array >= med_red_black_lat_bounds[0]) & (lat_array <= med_red_black_lat_bounds[1]), 17, basin_array)
basin_array = numpy.where((lon_array >= med_red_black_lon_bounds2[0]) & (lon_array <= med_red_black_lon_bounds2[1]) & (lat_array >= med_red_black_lat_bounds[0]) & (lat_array <= med_red_black_lat_bounds[1]), 17, basin_array)
basin_array = numpy.where(lat_array >= 67, 16, basin_array) # arctic ocean
assert cube.data.mask.shape, "reference data needs land mask"
basin_array = numpy.where(cube.data.mask == True, 18, basin_array) # Land
assert basin_array.min() == 11
assert basin_array.max() == 18
return basin_array | caf9db0696833654a0a99ecf65ed88315491f622 | 34,249 |
import re
def unstem_ngram(concept, sentence):
"""
converts all words of a string which contains stemmed words into their original form
:param concept:
:param sentence:
:return:
"""
ngrams = concept.split(' ')
words = []
for token in ngrams:
try:
word, pos = sentence.tokens_pos[token].split('::')
print(word, pos)
except:
token = re.sub('[-\.](\s|$)', '\\1', token)
try:
word, pos = sentence.tokens_pos[token].split('::')
except:
word, pos = token, 'NN'
words.append(word)
return words | 89c80f3bd6e70d4f04844ba427c9ce0cb3832e9e | 34,250 |
def updateObjectFromXML(xml_doc, obj, mapping):
"""
Handle updating an object. Based on XML input.
"""
nsmap = None
if isinstance(mapping, dict):
# The special key @namespaces is used to pass
# namespaces and prefix mappings for xpath selectors.
# e.g. {'x': 'http://example.com/namespace'}
if '@namespaces' in mapping:
nsmap = mapping['@namespaces']
else:
raise TypeError('Tag-to-property map must be a dict.')
# Iterate over the keys of the translation dictionary from event objects
# to xml objects, so we can update the fields with the new information
for k, v in mapping.items():
if k.startswith('@'):
continue
selected_text = None
if isinstance(v, str):
selector = v
elif isinstance(v, list):
selector = '/'.join(v)
else:
raise TypeError(
'Invalid tag-to-property mapping dict: '
'values must be strings or lists, not %s.' % (type(v),)
)
try:
selected_text = xml_doc.xpath(selector, namespaces=nsmap)
if isinstance(selected_text, list):
selected_text = selected_text[0]
selected_text = selected_text.text
setattr(obj, k, selected_text)
except IndexError:
# Assume the value is missing. It's also possible that the
# given selector is valid but wrong, but the more common
# case is that the element is missing. To be consistent with
# the prior implementation, empty resultsets just mean empty
# attributes.
continue
return obj | 4fcb647f03158d0a4d7a3dbb4c89584a141884de | 34,251 |
def get_py_type(json_type):
"""
Given a JSON type return a corresponding Python type.
If no type can be determined, return an empty string.
"""
if json_type in ("enum", "string", "File"):
return "str"
elif json_type == "boolean":
return "bool"
elif json_type == "array":
return "list"
elif json_type == "integer":
return "int"
elif json_type == "int":
return "int"
elif json_type == "number":
return "float"
elif json_type == "void":
return "None"
return "" | bf738c5257022019ec26ac89a45ba098e129afcd | 34,252 |
import requests
def check_status():
"""Checks if a player is in a live game, returns false if not in a game."""
try:
output = requests.get('https://127.0.0.1:2999/liveclientdata/playerlist', verify = False)
except:
return False
response = output.json()
if not type(response) == list:
return False
else:
return True | ae51cbe39425a67753ade68b2c3a86f748f089b2 | 34,253 |
def link(keys, values):
"""Creates hash table
:param numpy.array keys: key.
:param numpy.array values: value.
:return: (*dict*) -- hash table.
"""
return {k: v for k, v in zip(keys, values)} | 890e0aa99a3027a8e8348b828a56049a78fbbd55 | 34,256 |
def get_mature_sequence(precursor, mature, exact=False):
"""
From precursor and mature positions
get mature sequence with +/- 4 flanking nts.
Args:
*precursor(str)*: long sequence.
*mature(list)*: [start, end].
*exact(boolean)*: not add 4+/- flanking nts.
Returns:
*(str)*: mature sequence.
"""
p = "NNNNN%s" % precursor
s = mature[0] + 5
e = mature[1] + 5
if exact:
return p[s:e + 1]
return p[s - 4 :e + 5] | 6fa0aaa32173fb1e03b812f83fc895d6c797cc1d | 34,257 |
def int16(s):
"""Validates an hexadecimal (0x...) value"""
i = int(s, 16)
return i | b8d1f4afaaea1e2e15f494d1ebda8229d6661cb9 | 34,258 |
import os
def disintegrate(path_name):
"""
Disintegrates the path name by splitting all of the components apart
"""
return os.path.normpath(path_name).split(os.sep) | 91e367f057bc5d7d7faadbbbbecb4829e6a2a688 | 34,260 |
import re
def stripNonAlphaNum(text):
""" Delete non alphanumerical character into a string text and return a list """
return re.compile(r"\W+", re.UNICODE).split(text) | c348a1f3d9a22c6ba1a6a9121edd9d665e6e3d9f | 34,263 |
def get_buildings(expo, tax, ds):
"""
Helper method to get the buildings value.
"""
expo_tax = expo[expo.Taxonomy == tax]
expo_ds = expo_tax[expo_tax.Damage == ds]
return expo_ds.Buildings.iloc[0] | 59f99854cad80d5037cad09b96cf0254923f0376 | 34,264 |
def stat_ahead_behind(git, local_ref, remote_ref):
"""
Returns a tuple (ahead, behind) describing how far
from the remote ref the local ref is.
"""
behind = 0
ahead = 0
(ret, out) = git.call("rev-list", "--left-right",
"%s..%s" % (remote_ref, local_ref), raises=False)
if ret == 0:
ahead = len(out.split())
(ret, out) = git.call("rev-list", "--left-right",
"%s..%s" % (local_ref, remote_ref), raises=False)
if ret == 0:
behind = len(out.split())
return ahead, behind | 7275a43a94bf8bc9d4971aa79a198cdedca39031 | 34,265 |
def bpas_log_snr_new(file_name: str, mode: str = "INIT"):
"""
Retrieve ``new`` SNR values from bandpass log file.
Parameters
----------
file_name : str
Log file name.
mode : str, optional
Bandpass stage. The default is 'INIT' that is all stages.
Returns
-------
res : dict
Return dictionary {obs: snr}.
"""
res = {}
in_bpass = False
bpas_mode_line = f"PIMA_BPASS_{mode}"
with open(file_name) as file:
for line in file:
if bpas_mode_line in line:
in_bpass = True
continue
if in_bpass and line.startswith("Obs:"):
cols = line.split()
obs = int(cols[1])
snr_new_ind = cols.index("new:") + 1
try:
res[obs] = float(cols[snr_new_ind])
except ValueError:
res[obs] = 0.0
return res | 2a323067ace8b8f2965be5ac69d16206a5b93cf3 | 34,266 |
import os
def parse_error():
"""
Find undefined label errors
"""
if os.path.isfile('final.error'):
addrs = []
with open("final.error") as ferror:
for l in ferror:
if 'In function' in l: pass
elif 'undefined reference' in l and 'S_0x' in l:
addrs.append(l.split()[-1][1:-1])
return set(addrs) | fe8155529039183401cd1ce33337f30146b99755 | 34,267 |
import re
def _remove_phrasing(ipt_txt, phrase, replacement):
"""
removes the given phrase and replaces it with the replacement
:param ipt_txt: string to change
:param phrase: unwatned phrase to be removed
:param replacement: replacement phrase
:return: string with the phrase replaced with replacement
"""
a = re.search(
phrase,
ipt_txt)
it =0
while a and it<=50:
ipt_txt = re.sub(a.group(), replacement,
ipt_txt)
a = re.search(
phrase,
ipt_txt)
it+=1
if it>50:
raise Exception('While Loop fail. ' + str(phrase))
return ipt_txt | 9b36a46f28c222d44b7b5290e0c02dded81562f7 | 34,270 |
from typing import Tuple
from typing import Optional
def get_dbt_prefix_config(config) -> Tuple[Optional[str], Optional[str]]:
"""
Return (bucket, prefix) tuple
"""
if config.dbtPrefixConfig:
return (
config.dbtPrefixConfig.dbtBucketName,
config.dbtPrefixConfig.dbtObjectPrefix,
)
return None, None | 762e8afc8b40db49d8fa115b33a0cfc69ea038ac | 34,271 |
def isUsdCrate(path):
""" Check if a file is a USD crate file by reading in the first line of
the file. Doesn't check the file extension.
:Parameters:
path : `str`
USD file path
:Returns:
If the USD file is a crate (binary) file.
:Rtype:
`bool`
"""
with open(path) as f:
return f.readline().startswith("PXR-USDC") | aa6bc6a94552f49b83f2a17239a061d7f0672950 | 34,272 |
def xml_escape(str):
"""Replaces chars ' " < > & with xml safe counterparts"""
if str is None:
return None
str = str.replace("&", "&")
str = str.replace("'", "'")
str = str.replace("\"", """)
str = str.replace("<", "<")
str = str.replace(">", ">")
return str | 8bec0fc289be43e84fba847d4d51bccb44df2839 | 34,273 |
def compare_bit_arrays(bit_array_1, bit_array_2):
"""
:param bit_array_1:
:type bit_array_1: BitArray
:param bit_array_2:
:type bit_array_2: BitArray
"""
if bit_array_1.length != bit_array_2.length:
return False, -1
equal_bits = 0
for bit_array_1_element, bit_array_2_element in zip(bit_array_1.bin, bit_array_2.bin):
if bit_array_1_element == bit_array_2_element:
equal_bits += 1
if equal_bits != 0:
return False, equal_bits
return True, 0 | 239430184b2a727f0bd7551211989f73fd4a4dda | 34,274 |
def makeSquare(x, y, w, h):
""" Convert a rectangle ROI to square.
@param: left-most column
@param: top-most row
@param: width of region
@param: height of region
@return: x, y, w, h of the new ROI
"""
c_x = x + w // 2
c_y = y + h // 2
sz = max(w, h)
x = c_x - sz // 2
y = c_y - sz // 2
return int(x), int(y), int(sz), int(sz) | c6b9cae46fbcb929511802ef125a808a81087e7f | 34,275 |
def make_relative(path: str, root_path: str) -> str:
"""Make path relative with respect to a
root directory
Arguments:
path (str): current path.
root_path (str): root directory path.
Returns:
str: relative path.
"""
r_path = path.replace(root_path, '')
if r_path:
if r_path[0] == '/':
r_path = r_path[1:]
return r_path | 04e98d971a3b1ee0a698f4b082c8cb3529b60185 | 34,276 |
def count_missense_per_gene(lines):
""" count the number of missense variants in each gene.
"""
counts = {}
for x in lines:
x = x.split("\t")
gene = x[0]
consequence = x[3]
if gene not in counts:
counts[gene] = 0
if consequence != "missense_variant":
continue
counts[gene] += 1
return counts | b20b7deca5b48af30378ee40ed03763069a45768 | 34,278 |
from typing import Mapping
def _get_full_name(artifact_id: int, name_from_id: Mapping[int, str]) -> str:
"""Converts the int-typed id to full string name."""
return name_from_id[artifact_id] | 4bfdee8c9123bef5c25225e8bea3d6488e9a24a7 | 34,280 |
import os
def model_path(*args):
"""Translate model path to file system path."""
return os.path.abspath(os.path.join(os.path.dirname(__file__), "../test/", *args)) | 236040ea6e8ca84081a64d2d5a1b57b4d7947ea2 | 34,281 |
def FourCharCode(fcc):
"""Create an integer from the provided 4-byte string, required for finding keychain items based on protocol type"""
return ord(fcc[0]) << 24 | ord(fcc[1]) << 16 | ord(fcc[2]) << 8 | ord(fcc[3]) | 51e71f6d27f3f1f4813e9208019f7238acd8940a | 34,283 |
import datetime as dt
def make_dates_ordinal(df, dates_column):
"""
This function converts the dates of a DataFrame column to integers, in order to easily fit the data to a regression model.
More specifically, the function toordinal() returns the proleptic Gregorian ordinal of a date.
In simple terms datetime.toordinal() returns the day count from the date 01/01/01
Though Gregorian calendar was not followed before October 1582, several computer
systems follow the Gregorian calendar for the dates that comes even before October 1582.
Python's date class also does the same.
Args:
df - Pandas DataFrame
dates_column - column of DataFrame, input as a string. All values in column must be
of type datetime64[ns].
Output:
Processed DataFrame is returned.
"""
# The function imports the required datetime module.
# Applies datetime.toordinal() function to desired column of DataFrame.
df[dates_column] = df[dates_column].map(dt.datetime.toordinal)
# Returns processed DataFrame
return df | 43b84eca8c9329d6c7598a506c2703122f2fb634 | 34,284 |
def generate_filename(in_path, out_dir, bitrate, encoder):
"""
Create a new filename based on the original video file and test bitrate.
Parameters
----------
in_path : str
Full path of input video.
out_dir : str
Directory of output video.
bitrate : int
Video bitrate in kbit/s.
encoder : str
Encoder for FFmpeg to use.
Returns
-------
out_path : str
Full path of new output video.
"""
if in_path.count('.') >= 2:
raise Exception('Filename has multiple full stops')
out_video = in_path.split(
'/')[-1].replace('.', f'_{encoder}_{int(bitrate)}.')
out_path = out_dir + out_video
return out_path | 6e8edb9a0bfd7e1c8ae779bd5c546f0f50389415 | 34,286 |
def preconvert_bool(value, name):
"""
Converts the given `value` to an acceptable boolean by the wrapper.
Parameters
----------
value : `int`
The value to convert.
name : `str`
The name of the value.
Returns
-------
value : `str`
Raises
------
TypeError
If `value` was not passed as `int` instance.
ValueError
If value was passed as an `int` instance, but not as `0` or `1` either.
"""
if (type(value) is bool):
pass
elif isinstance(value, int):
if (value not in (0, 1)):
raise ValueError(f'`{name}` was given as `int` instance, but neither as `0` or `1`, got {value!r}.')
value = bool(value)
else:
raise TypeError(f'`{name}` can be `bool` or `int` instance as `0` or `1`, got {value.__class__.__name__}.')
return value | d326f4d414a01f156e7ba1793d8f5a667a0e8b72 | 34,287 |
def get_pressed_button(mx, my, buttons):
"""Checking if the mouse is pressing a button"""
for button in buttons:
if button.x <= mx <= button.x + button.length and button.y <= my <= button.y + button.height:
return button
return None | 6ac49779b8b01f5a6ced1ae01e74f850843264a5 | 34,288 |
def _compare(data, k, threshold, sign=True):
"""
Obtain indicator vector for the samples with k-th feature > threshold
:param data: array-like of shape (n_sample, n_feat)
:param k: int
Index of feature in question
:param threshold: float
Threshold for the comparison
:param sign: bool
Flag, if False, return indicator of the complement
:return: array-like of shape (n_samples,)
"""
if sign:
return data[:, k] > threshold
else:
return data[:, k] <= threshold | 730582aa42dec9087de0d4869af42b3b47dd83e0 | 34,290 |
def get_cellId_from_hid(hid, rect_width):
"""
:type index: GridCellId
:type rect_width: int
:return:
"""
thid = hid.transpose(-(rect_width / 2), -(rect_width / 2))
cellId = thid.to_cell_id()
return cellId | a0cbc1813aa5eaed82ba6733692c4a8573da6953 | 34,291 |
def get_live_fs_dates(year, all_obs, fs_from):
"""Generate a forecast at the time of each observation."""
fs_dates = sorted(o['date'] for o in all_obs)
if fs_from is None:
# Return only the most recent forecasting date.
# Important: *must* be a list.
return [fs_dates[-1]]
else:
return [d for d in fs_dates if d >= fs_from] | 89c6a217793fa921a5ed3407690b464e45466779 | 34,292 |
def lowest_dropped(grades):
"""
>>> g = ((30, 70, 80), (70, 30), (10, 80, 30))
>>> lowest_dropped(g)
"""
return [list(i).remove(min(i)) for i in grades] | fd6d50ce22abdf8209c8ab63e55476be654190bf | 34,295 |
def get_common_subpeptides(list1, list2):
"""Get intersection of two sub-peptide chain lists (chain names)"""
common_list = [sub_peptide for sub_peptide in list1 if sub_peptide in list2]
return common_list | fa088fa56c2e77cb252af26d7cc848e99bc5dd04 | 34,296 |
def simplefilter(lst, start=0, ss=1, num=50, **kw):
"""Given a set of parameters, filters the list."""
ret = []
for i, el in enumerate(lst[int(start):]):
if len(ret) >= int(num): break
if (i-start) % int(ss) != 0: continue
ret.append(el)
return ret | 4bf0a9fd35f30680a401680f16c9245ec774a4cf | 34,298 |
def trace_driven_cache_hit_ratio(workload, cache, warmup_ratio=0.25):
"""Compute cache hit ratio of a cache under an arbitrary trace-driven
workload.
Parameters
----------
workload : list or array
List of URLs or content identifiers extracted from a trace. This list
only needs to contains content identifiers and not timestamps
cache : Cache
Instance of a cache object
warmup_ratio : float, optional
Ratio of requests of the workload used to warm up the cache (i.e. whose
cache hit/miss results are discarded)
Returns
-------
cache_hit_ratio : float
The cache hit ratio
"""
if warmup_ratio < 0 or warmup_ratio > 1:
raise ValueError("warmup_ratio must be comprised between 0 and 1")
n = len(workload)
cache_hits = 0
n_warmup = int(warmup_ratio * n)
n_req = 0
for content in workload:
if cache.get(content):
if n_req >= n_warmup:
cache_hits += 1
else:
cache.put(content)
n_req += 1
return cache_hits / (n - n_warmup) | f19da290fc4edf5284e8e52ff14ef60b2c59956b | 34,299 |
def _make_conn_info(self,dev1,port1,dev2,port2,dir='bi'):
""" Returns `switch_name` and ``conn_id`` if available.
and returns ``None`` if the connection stretches over multi optic
switches
"""
sw1 = self._intf_map[dev1.lower()][port1.lower()]['switch-name']
sw2 = self._intf_map[dev1.lower()][port1.lower()]['switch-name']
if sw1 != sw2:
return ()
else:
s1 = self._intf_map[dev1.lower()][port1.lower()]['switch-port']
s2 = self._intf_map[dev2.lower()][port2.lower()]['switch-port']
if dir == 'bi':
connect = '-'
else:
connect = '>'
return (sw1, s1+connect+s2, s1 ,s2, connect) | 828bc1dcf24d61c6e1e25b892256e75b168f4d11 | 34,300 |
def handle_special_cases(command):
"""
Called by parse_commands.
This is here to separate the special-case manging from the rest of the
code.
"""
# These require the 'cid' param to be given twice for some reason.
if command.name in ('SearchRequest', 'NotFoundResponse'):
command = command._replace(input_params=command.input_params[:-1])
if command.name == 'RsrvIsUpResponse':
command = command._replace(name='Beacon')
return command | 460ce2e99bec47c50d9488c0a1ddec5844ddd766 | 34,301 |
import subprocess
import click
def check_tool(tool,local,not_required_message=None):
"""
:param str tool: name of executable, e.g. 'cmake'
"""
completed_which = subprocess.run(['which', tool], capture_output=True, text=True)
which_string = completed_which.stdout.strip().replace('\n', ' ')
if completed_which.returncode !=0:
click.secho(f' {tool} is not available.', fg='bright_red')
if not_required_message:
print(f' {not_required_message}')
return False
else:
completed_version = subprocess.run([tool, '--version'], capture_output=True, text=True)
version_string = completed_version.stdout.strip().replace('\n\n','\n').replace('\n','\n ')
if local:
click.secho(f' {tool} is available:', fg='green')
print(f' {version_string}')
print(f' {which_string}')
return True
else: # VSC cluster
if completed_which.stdout.startswith('/usr/bin/'):
click.secho(f' {tool} is available from the system. However, it is recommended to use a cluster module version.'
, fg='bright_red'
)
print( f' {which_string}\n'
f' {version_string}\n')
return False
else:
click.secho(f' {tool} is available:', fg='green')
print(f' {version_string}')
print(f' {which_string}')
return True | d9979e7e9cfe2a90fc363e790de5a1d4b76e5b96 | 34,303 |
def convert_to_bcd(decimal):
""" Converts a decimal value to a bcd value
:param value: The decimal value to to pack into bcd
:returns: The number in bcd form
"""
place, bcd = 0, 0
while decimal > 0:
nibble = decimal % 10
bcd += nibble << place
decimal /= 10
place += 4
return bcd | ea8a950ac54065c8ae14230544c4e8c2d64a4bb6 | 34,307 |
def exclude_points_ranking(dataset):
"""
When we do ranking, none of the existing jobs should be added again
:param dataset:
:return:
"""
return dataset.datapoints | 3a08c90fad7b963fdb5a1a3ff7643c5902d1587c | 34,308 |
def create(element, target, cns_type, compensate=True):
"""
Creates a constraint on the given element, constraining it
against the given target.
:param element: The element to constrain
:type element: Host Specific
:param target: The element to constrain to
:type target: Host Specific
:param cns_type: Supported - general - constraint types are:
>> position
>> rotation
>> transform
Note: Specific host implementations may expose further
constraint types.
:type cns_type: str
:param compensate: If True the current offset between the element
and the target is retained during the constraint creation.
:type compensate: bool
:return: Constraint (Host Specific)
"""
return None | 3edcc76133a6e0ce6a4a3909e65b836920ee9f4f | 34,309 |
import argparse
def setup_cli(args):
""" Configure command-line arguements """
description = "outputs a list of domains between start and stop"
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-s', '--start', action='store',
dest='start', help='Start domain')
parser.add_argument('-e', '--end', action='store',
dest='end', help='End domain')
return parser.parse_args(args) | ce1952d8144599d7037ac8e36c2d554edc1a9d84 | 34,310 |
def post_process_weird(system_mentions):
""" Removes all mentions which are "mm", "hmm", "ahem", "um", "US" or
"U.S.".
Args:
system_mentions (list(Mention): A list of system mentions.
Returns:
list(Mention): the filtered list of mentions.
"""
return sorted(
[mention for mention
in system_mentions
if " ".join(mention.attributes["tokens"]).lower() not in
["mm", "hmm", "ahem", "um"]
and " ".join(mention.attributes["tokens"]) != "US"
and " ".join(mention.attributes["tokens"]) != "U.S."]
) | 4cc8198ff29768178859b6115d28dd958f8599be | 34,311 |
def parse_line(record):
"""
Parse the raw input of the data
"""
user_id, item_id, rating = record.split(',')
return int(user_id), int(item_id), int(rating) | 81f3ff6aa967cb8f5af776b33101eef77a4c470b | 34,313 |
def getCluster(self):
"""Get the HDInsight cluster object"""
# Get all the HDInsight clusters
hdinsight_clusters = self.hdinsight_client.clusters.list()
# Get current cluster
for hdinsight_cluster in hdinsight_clusters:
if hdinsight_cluster.name == self.params["CLUSTER_DNS_NAME"]:
return hdinsight_cluster
return None | a6dbd63c1e85db8893d32cbe12c01b5d9df5b350 | 34,315 |
def diff_files(listA, listB, exclude_attrs=[], mapA={}, mapB={}):
"""
Compute the diff of two lists for files, treating them as set-like.
"""
added = []
deleted = []
cleanlistA = []
for fileA in listA:
cleanfileA = fileA.copy()
for exclude_attr in exclude_attrs:
if exclude_attr.startswith('files.'):
file_attr = exclude_attr.replace('files.', '')
cleanfileA.pop(file_attr)
cleanlistA.append(cleanfileA)
cleanlistB = []
for fileB in listB:
cleanfileB = fileB.copy()
for exclude_attr in exclude_attrs:
if exclude_attr.startswith('files.'):
file_attr = exclude_attr.replace('files.', '')
cleanfileB.pop(file_attr)
cleanlistB.append(cleanfileB)
for cleanfileA in cleanlistA:
if cleanfileA not in cleanlistB:
deleted.append(cleanfileA)
for cleanfileB in cleanlistB:
if cleanfileB not in cleanlistA:
added.append(cleanfileB)
return {
'added': added,
'deleted': deleted,
} | 8b23fa6fe5a24c3969cc5e17ad33b0f4271e921c | 34,316 |
import requests
def get_remaining_rate_limit(api_key: str) -> int:
"""
Returns your remaining rate limit by
making a request to
:ref:`Apod <extensions/apod:Apod>` and
getting the header ``X-RateLimit-Remaining``,
that's returned on every API response.
For example, if you are using an
API key different from ``DEMO_KEY``,
you have a default hourly rate
limit of 1.000 requests, acording
to the `Portal <https://api.nasa.gov/>`_.
So, if you make 2 requests, your
remaining rate limit will be equal to
998.
**Example**
.. code-block:: python3
from nasawrapper.utils import get_remaining_rate_limit
remaining = get_remaining_rate_limit("DEMO_KEY")
print(reamining)
"""
headers = requests.get(f"https://api.nasa.gov/planetary/apod?api_key={api_key}").headers
return int(headers["X-RateLimit-Remaining"]) | 39a1b49ca9148a655cc90e25f8a1b8027f4821b5 | 34,318 |
import six
def is_iterable(x):
"""Determine whether ``x`` is a non-string iterable"""
if isinstance(x, six.string_types):
return False
return hasattr(x, "__iter__") | 38e1d8955c6114bd1dfa2b7b6acdbed7572b5212 | 34,319 |
def _is_fileobj(obj):
""" Is `obj` a file-like object?"""
return hasattr(obj, 'read') and hasattr(obj, 'write') | 8d09dce78239fe134116e641d4cf5934ffc173ec | 34,321 |
def ends_with_blank_line(block):
"""
Returns true if block ends with a blank line, descending if needed
into lists and sublists.
"""
if block.last_line_blank:
return True
if block.t in ['List', 'ListItem'] and block.children:
return ends_with_blank_line(block.children[-1])
else:
return False | 2b079351e69ad288ce17b9d42b5774814a97aed3 | 34,322 |
from typing import List
def person(word: str, parts: List[str]) -> str:
"""
Format a person name.
>>> person("foo", ["Aldous", "Huxley"])
'Aldous Huxley'
>>> person("foo", ["Théodore Agrippa d’", "Aubigné"])
'Théodore Agrippa d’ Aubigné'
>>> person("foo", ["Théodore Agrippa d’", "Aubigné", "'=oui"])
'Théodore Agrippa d’Aubigné'
>>> person("foo", ["L. L. Zamenhof"])
'L. L. Zamenhof'
>>> person("foo", ["lang=en", "Roger", "Adams"])
'Roger Adams'
>>> person("foo", ["", "Brantôme", "Brantôme (écrivain)"])
'Brantôme'
Source: https://fr.wiktionary.org/wiki/Mod%C3%A8le:nom_w_pc
"""
data = [p for p in parts if "=" in p]
parts = [p for p in parts if "=" not in p]
res = parts.pop(0)
# First name only
if not parts:
return res
# Last name only or first name + last name
space = "" if ("'=oui" in data or not res) else " "
res += space + parts.pop(0)
return res | a910de33a2a599496b37848509765e956ae57b25 | 34,323 |
def clause_time(verb, api):
"""Look for an adjunctive Time element in the clause."""
L, F = api.L, api.F
clause_atom = L.u(verb, 'clause_atom')[0]
time_phs = [
p for p in L.d(clause_atom, 'phrase')
if F.function.v(p) == 'Time'
]
data = {'has_time': 0}
if time_phs:
data['has_time'] = 1
return data | 8a4ca0ac32970089e9a4d19dc029a91dfcc1512b | 34,324 |
def get_genus_count(genus_dict, blast_line, sci_name_column="15"):
"""this function count the distribution of the genus for the top hit"""
sci_name_column = int(sci_name_column)-1
scinetific_name = blast_line[sci_name_column]
try:
genus = scinetific_name.split()[0]
except:
return genus_dict
try:
genus_dict[genus]+=1
except:
genus_dict[genus]=1
return genus_dict | 2ed19b4accd959cb363cb4d7d40efe6b74ceae73 | 34,325 |
def people():
"""
<enumeratedValueSet variable="People"> <value value="500"/> </enumeratedValueSet>
Integer between 1 and 500
"""
people = 500
return f'<enumeratedValueSet variable="People"> <value value="{people}"/> </enumeratedValueSet>' | 1c42b254a1007d4fb1c3106ae1a66008a64b943c | 34,326 |
import pip
import site
def install_package(package):
"""Install a pip package to the user's site-packages directory."""
exitval = pip.main(['install', '--user', package])
if exitval == 0:
# Reload sys.path to make sure the user's site-packages are in sys.path
site.main()
return exitval == 0 | 3c9781a290e84a2414ba28ea737a774e41dc49e1 | 34,327 |
def get_hit_table(hit):
"""Create context for a single hit in the search.
Args:
hit(Dict): a dictionary representing a single hit in the search.
Returns:
(dict).The hit context.
(list).the headers of the hit.
"""
table_context = {
'_index': hit.get('_index'),
'_id': hit.get('_id'),
'_type': hit.get('_type'),
'_score': hit.get('_score'),
}
headers = ['_index', '_id', '_type', '_score']
if hit.get('_source') is not None:
for source_field in hit.get('_source').keys():
table_context[str(source_field)] = hit.get('_source').get(str(source_field))
headers.append(source_field)
return table_context, headers | 50a6d4b2304381b7cd9977213fb64f36d3485e80 | 34,328 |
def scaler(scale):
"""Create a function that scales by a specific value."""
def inner(val):
return val * scale
return inner | b2e9a8efb5f0aff079fbfaf8c5326978a6990660 | 34,329 |
from typing import Union
import re
def mask_cnpj(texto: str) -> Union[str, None]:
"""
Anonimização de CPF
"""
formatted_text = texto
pattern = '[0-9]{3}\.[0-9]{3}\.[0-9]{3}-[0-9]{2}'
cpfinder = re.compile(pattern)
if isinstance(texto, str):
is_cpf = cpfinder.findall(texto)
if is_cpf:
for cpf in is_cpf:
masked_cpf = f"XXX-{cpf[4:-3]}-XX"
formatted_text = formatted_text.replace(cpf, masked_cpf)
return formatted_text
else:
return None | b2c5e822990dd8836431b6044f477cc3854f408a | 34,330 |
def _remove_empty_events(sse):
"""
Given a sequence of synchronous events (SSE) `sse` consisting of a pool of
pixel positions and associated synchronous events (see below), returns a
copy of `sse` where all empty events have been removed.
`sse` must be provided as a dictionary of type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse : dict
A dictionary of pixel positions `(i, j)` as keys, and sets `S` of
synchronous events as values (see above).
Returns
-------
sse_new : dict
A copy of `sse` where all empty events have been removed.
"""
sse_new = sse.copy()
for pixel, link in sse.items():
if link == set([]):
del sse_new[pixel]
return sse_new | 0596d43cc75fdd040c5096e3ddb81277b48d7456 | 34,332 |
import calendar
def utc_datetime_to_timestamp(dt):
"""
Converts datetime (UTC) to Unix timestamp
:type dt: datetime
:rtype: int
"""
return calendar.timegm(dt.utctimetuple()) | 2418293fe6ae7c95f2d541281e55a93f074699f9 | 34,333 |
def time_formatter(seconds: int, show: bool = True):
"""将以秒为单位的时间转化为相应的分钟、小时、天。
Args:
seconds: 秒数
show: 是否打印显示信息,默认为True,那么没有返回值,设置为False,不打印信息,但返回值
Returns:
若show为False,则有返回值,tuple中值的为天,小时,分钟,秒
"""
d = int(seconds // 86400)
h = int(seconds // 3600 % 24)
m = int((seconds % 3600) // 60)
s = round(seconds % 60, 3)
msg = ""
if d > 0:
msg += f"{d} days, "
if h > 0:
msg += f"{h} hours, "
if m > 0:
msg += f"{m} minutes, "
if s > 0:
msg += f"{s} seconds."
if show:
print(msg)
else:
return msg | 9bb387a43a707843173fc71f25c4f1481e64ca49 | 34,334 |
def remove_comments(s):
"""removes the comments starting with # in the text."""
pos = s.find("#")
if pos == -1:
return s
return s[0:pos].strip() | 47e44d12f35c7b254f3f4ec001c630b5175b1e32 | 34,335 |
def mkcols(l, rows):
"""
Compute the size of our columns by first making them a divisible of our row
height and then splitting our list into smaller lists the size of the row
height.
"""
cols = []
base = 0
while len(l) > rows and len(l) % rows != 0:
l.append("")
for i in range(rows, len(l) + rows, rows):
cols.append(l[base:i])
base = i
return cols | ce097db9e2737c959f9535d4b1464d4c3324c07e | 34,338 |
def get_substrings(string, n):
"""Return a list of substrings of lenght for the given string"""
substrings = set()
for i in range(len(string) - n + 1):
substrings.add(string[i:i+n])
return [substring for substring in substrings] | 1574e91875a753a20bcf292f1b86f1ae86ac2416 | 34,341 |
import re
def rmspecialchars(value):
"""
Remove any special characters except period (.) and negative (-) from numeric values
Parameters
----------
value : string
String value to remove any existing characters from
Returns
-------
value : string
String value to without any special characters
Examples
--------
>>> import helpers
>>> helpers.rmspecialchars(value = "*6.5_")
6.5
>>> helpers.rmspecialchars(value = "ICE")
ICE
>>> helpers.rmspecialchars(value = "-4.2")
-4.2
>>> helpers.rmspecialchars(value = "")
>>> helpers.rmspecialchars(value = "%&!@#8.32&#*;")
8.32
"""
value = re.sub("[^A-Za-z0-9.-]+", "", value)
return value | 26de451a5cfef33f9384ce13bda9e495ae81fc5d | 34,342 |
import numpy
def Anscombe_Poisson_residual(model, data, mask=None):
"""
Return the Anscombe Poisson residuals between model and data.
mask sets the level in model below which the returned residual array is
masked. This excludes very small values where the residuals are not normal.
1e-2 seems to be a good default for the NIEHS human data. (model = 1e-2,
data = 0, yields a residual of ~1.5.)
Residuals defined in this manner are more normally distributed than the
linear residuals when the mean is small. See this reference below for
justification: Pierce DA and Schafer DW, "Residuals in generalized linear
models" Journal of the American Statistical Association, 81(396)977-986
(1986).
Note that I tried implementing the "adjusted deviance" residuals, but they
always looked very biased for the cases where the data was 0.
"""
if data.folded and not model.folded:
model = model.fold()
# Because my data have often been projected downward or averaged over many
# iterations, it appears better to apply the same transformation to the data
# and the model.
# For some reason data**(-1./3) results in entries in data that are zero
# becoming masked. Not just the result, but the data array itself. We use
# the power call to get around that.
# This seems to be a common problem, that we want to use numpy.ma functions
# on masked arrays, because otherwise the mask on the input itself can be
# changed. Subtle and annoying. If we need to create our own functions, we
# can use numpy.ma.core._MaskedUnaryOperation.
datatrans = data**(2./3) - numpy.ma.power(data,-1./3)/9
modeltrans = model**(2./3) - numpy.ma.power(model,-1./3)/9
resid = 1.5*(datatrans - modeltrans)/model**(1./6)
if mask is not None:
tomask = numpy.logical_and(model <= mask, data <= mask)
tomask = numpy.logical_or(tomask, data == 0)
resid = numpy.ma.masked_where(tomask, resid)
# It makes more sense to me to have a minus sign here... So when the
# model is high, the residual is positive. This is opposite of the
# Pierce and Schafner convention.
return -resid | 9ff4d116426f6846afc1853834826697b9b63327 | 34,343 |
def braille_bin(char: str) -> str:
"""Inverse of get_braille()"""
o = ord(char) - 0x2800
s = format(o, "b").rjust(8, "0")
s = s[::-1]
s = (
s[:3] + s[6] + s[3:6] + s[7]
) # rearrange ISO/TR 11548-1 dot order to something more suitable
return s | d5081e46a275b98d567e980ed58a97ed25275ffd | 34,344 |
import re
def remove_urls(text):
"""Remove urls from text"""
return re.sub('(https:|http:|www\.)\S*', '', text) | db7c2fa5e96ee525aa8c2d17ddf2888a354958f7 | 34,347 |
def get_config_rules_statuses(config):
"""Retrieves all of the AWS Config rules.
Args:
config: boto3 config object
Returns:
List of AWS Config rules
"""
config_rules = []
page_iterator = config.get_paginator("describe_compliance_by_config_rule")
for page in page_iterator.paginate():
config_rules += page["ComplianceByConfigRules"]
return config_rules | 7c3ec7281d06966fb97193274fc62465e8910d08 | 34,348 |
def check_params(params, field_list):
"""
Helper to validate params.
Use this in function definitions if they require specific fields
to be present.
:param params: structure that contains the fields
:type params: ``dict``
:param field_list: list of dict representing the fields
[{'name': str, 'required': True/False', 'type': cls}]
:type field_list: ``list`` of ``dict``
:return True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
for d in field_list:
if not d['name'] in params:
if 'required' in d and d['required'] is True:
raise ValueError(("%s is required and must be of type: %s" %
(d['name'], str(d['type']))))
else:
if not isinstance(params[d['name']], d['type']):
raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
d['name'], str(d['type']), params[d['name']],
type(params[d['name']]))))
if 'values' in d:
if params[d['name']] not in d['values']:
raise ValueError(("%s must be one of: %s" % (
d['name'], ','.join(d['values']))))
if isinstance(params[d['name']], int):
if 'min' in d:
if params[d['name']] < d['min']:
raise ValueError(("%s must be greater than or equal to: %s" % (
d['name'], d['min'])))
if 'max' in d:
if params[d['name']] > d['max']:
raise ValueError("%s must be less than or equal to: %s" % (
d['name'], d['max']))
return True | 8ce0ad0123c3dfed278564b6cd7fdb8545fdc7a7 | 34,349 |
def resolve_attr(obj, path):
"""A recursive version of getattr for navigating dotted paths.
Args:
obj: An object for which we want to retrieve a nested attribute.
path: A dot separated string containing zero or more attribute names.
Returns:
The attribute referred to by obj.a1.a2.a3...
Raises:
AttributeError: If there is no such attribute.
"""
if not path:
return obj
head, _, tail = path.partition('.')
head_obj = getattr(obj, head)
return resolve_attr(head_obj, tail) | 3951e8436b2e51f7f2815ea0ed2bda35a8fc08ce | 34,350 |
def __red_blue_pixel_filter(pixel):
"""This method applies the filter defined above to a pixel
Returns an int value (255 or 0) as the result of applying the filter
to teh pixel. If the received pixel is transparent returns 0.
"""
white_pixel = 255
black_pixel = 0
red_band_value = pixel[0]
blue_band_value = pixel[2]
alpha_band_value = pixel[3]
if alpha_band_value == 0: # transparent pixel case
return black_pixel
if blue_band_value == 0: # 0 Blue component case
return white_pixel
if red_band_value / blue_band_value > 0.95:
return white_pixel
else:
return black_pixel | 66988554726d99bdbc1eb8a75bb64947b45356af | 34,351 |
def column_to_list(data, index):
"""
Função para adicionar as colunas(features) de uma lista em outra lista, na mesma ordem.
Argumentos:
data: amostra de dados.
index: índice da coluna na amostra.
Retorna:
Uma lista apenas com os dados da coluna informada no index.
"""
column_list = []
# Dica: Você pode usar um for para iterar sobre as amostras, pegar a feature pelo seu índice, e dar append para uma lista
for line in data:
column_list.append(line[index])
return column_list | 55c8201e72234af57289d546078953aa07f3f1bb | 34,353 |
import sys
def rich_print(*values, **kwargs):
"""A print function allowing bold, italics, and colour, if stdout.write or
stderr.write supports a 'charformat' keyword argument and is connected to a
qtutils.OutputBox. This method accepts the same arguments as the Python print
function, as well as keyword args: 'color', a string containing either a named color
or hex value of a color; 'bold' and 'italic': booleans as to whether the text should
be bold or italic. If file=sys.stderr, the output will be red. Otherwise, if color
is not specified, output will be white. The 'color' and 'bold' keyword arguments if
provided will override the settings inferred from the file keyword argument. If the
stream does not support the 'charformat' keyword argument, then formatting will be
ignored."""
file = kwargs.get('file', sys.stdout)
if file is sys.stderr:
color = 'red'
bold = False
else:
color = 'white'
bold = False
bold = kwargs.pop('bold', bold)
color = kwargs.pop('color', color)
italic = kwargs.pop('italic', False)
if not getattr(file, 'supports_rich_write', False):
return print(*values, **kwargs)
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
charformat = repr((color, bold, italic)).encode('utf8')
file.write(sep.join(str(s) for s in values) + end, charformat=charformat) | 1e4f322595250053b46c28cc370bd1564b9f1bc1 | 34,354 |
from typing import Dict
from typing import Any
def get_mesh() -> Dict[str, Any]:
"""Front-end call."""
return {} | 1bffe4ca7efd1358aa5343749851f4aa05921855 | 34,355 |
def tral_file_filter(file):
"""In file, check whether there are TRs that belong to the same protein and have the same starting amino acid.
This should never occur and these instances are therefore considered duplicates.
Paramters
file (str): Merged TRAL output file that will be checked for duplicates
Returns
line_list (list[str]):
A list containing lines describing tandem repeats, with the duplicate lines removed
"""
filtered_dict = {}
line_list = []
dupe_count = 0
with open(file, "r") as f:
for line in f:
if line.startswith("ID"):
line_list.append(line)
continue
prot_id, begin = line.split("\t")[0], line.split("\t")[1]
try:
if begin in filtered_dict[prot_id]:
dupe_count += 1
continue
filtered_dict[prot_id].append(begin)
line_list.append(line)
except KeyError:
filtered_dict[prot_id] = [begin]
line_list.append(line)
print("Filter found {} cases where TRs in the same protein had the same start position.".format(dupe_count))
return line_list | f84e575d0ce0e4ebf850a97b0249261cfa495cae | 34,358 |
def run(*args, **kwargs):
"""
模型函数,计算分值用的.
:param args:
:param kwargs:
:return:
"""
return (args, kwargs) | ed8735f0608a1455f58140057a20d2e7a48b95b1 | 34,359 |
from functools import reduce
def clean(puzzle):
"""Format a given puzzle into the form {name: claim}."""
def clean_claim(claim):
"""Strip uninformative leading words from a given claim."""
rm_claims_comma = lambda x: x.split("claims, ")[1] if "claims, " in x else x
rm_claims_that = lambda x: x.split("claims that ")[1] if "claims that " in x else x
rm_says_comma = lambda x: x.split("says, ")[1] if "says, " in x else x
rm_says_that = lambda x: x.split("says that ")[1] if "says that " in x else x
rm_tells_you_comma = lambda x: x.split("tells you, ")[1] if "tells you, " in x else x
rm_tells_you_that = lambda x: x.split("tells you that ")[1] if "tells you that " in x else x
rm_trail_apo = lambda x: x[:-1] if x[-1] == "\'" else x
rm_lead_backtick = lambda x: x[1:] if x[0] == "`" else x
func_list = [rm_claims_comma, rm_claims_that, rm_says_comma,
rm_says_that, rm_tells_you_comma, rm_tells_you_that,
rm_trail_apo, rm_lead_backtick]
parsed_claim = reduce(lambda c, func: func(c), func_list, claim)
return parsed_claim
rm_lead_apo = lambda x: x[1:] if x[0] == '\'' else x
rm_ws = lambda x: x.strip()
lines = puzzle.split(".")
lines, name_line = lines[1:], lines[0]
sub_comma_for_and = lambda x: x.replace("and", ",")
make_names = lambda x: map(rm_ws, x.split(":")[1].split(","))
names = make_names(sub_comma_for_and(name_line))
claims = {}
for line in lines:
if len(line) > 1:
formatted_line = rm_ws(rm_lead_apo(line))
name = formatted_line[:formatted_line.find(" ")]
claim = formatted_line[formatted_line.find(" ") + 1:]
if name not in names:
raise ValueError("Badly formatted puzzle")
else:
claims[name] = clean_claim(claim)
return claims | 6c2afd9cad86b235df3005622457dc2b5b6f946d | 34,360 |
def selection_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the Selection Sort approach.
Time complexity: O(n^2) for best, worst, and average.
Space complexity: total O(n) auxiliary O(1).
Args:
nums: A list of numbers.
Returns:
The sorted list.
"""
for pivot in range(0, len(nums) - 1):
smallest = pivot
# Find smallest value, then swap it with the pivot
for target in range(pivot + 1, len(nums)):
if nums[target] < nums[smallest]:
smallest = target
nums[pivot], nums[smallest] = nums[smallest], nums[pivot]
return nums | c96c12e7361e6b617528b9cc632b4003963ea8ab | 34,361 |
def canConstruct(ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
for i in set(ransomNote):
if ransomNote.count(i) > magazine.count(i):
return False
return True | aa9159bfdc26d34e56f621bab02cd781200e0594 | 34,362 |
import itertools
def get_pwdist_indices(sequences):
"""
From a list of sequences get lower triangle indices tuples (i,j)
for pairwise comparisons.
Parameters
----------
sequences : list of strings
list of (likely amino acid) strings
Returns
-------
ind_tuples : list
ist of tuples (i,j)
"""
ind_tuples = []
L = len(sequences)
for i, j in itertools.product(list(range(L)), list(range(L))):
if i <= j:
ind_tuples.append((i, j))
return(ind_tuples) | f9ab2bbcc4481f898ff83963ad595240b95ad2b2 | 34,363 |
def shot_direct_neighbors(graph, reconstruction, shot_id):
"""Reconstructed shots sharing reconstructed points with a given shot."""
neighbors = set()
for track_id in graph[shot_id]:
if track_id in reconstruction.points:
for neighbor in graph[track_id]:
if neighbor in reconstruction.shots:
neighbors.add(neighbor)
return neighbors | 9b7ff5689aa32c30b85d62e535feab15ca6916e9 | 34,365 |
def fix_iso(job):
"""
Add couple xyz to the fix_ensemble inside LAMMPS
Args:
job (LAMMPS): Lammps job object
Returns:
LAMMPS: Return updated job object
"""
job.input.control["fix___ensemble"] = (
job.input.control["fix___ensemble"] + " couple xyz"
)
return job | 376bfb69abc2d59f42e766f4c9bda198468046ee | 34,366 |
def max_rectangle_area(histogram):
"""Find the area of the largest rectangle that fits entirely under
the histogram.
"""
stack = []
top = lambda: stack[-1]
max_area = 0
pos = 0 # current position in the histogram
for pos, height in enumerate(histogram):
start = pos # position where rectangle starts
while True:
if not stack or height > top()[1]:
stack.append((start, height)) # push
elif stack and height < top()[1]:
max_area = max(max_area, top()[1] * (pos - top()[0]))
start, _ = stack.pop()
continue
break # height == top().height goes here
pos += 1
for start, height in stack:
max_area = max(max_area, height * (pos - start))
return max_area | 603696f0a358019e54381d9a269b0bde0badf1a2 | 34,367 |
def get_version_v2(uri):
"""
Canned response nova v2 version.
Cf: http://developer.openstack.org/api-ref-compute-v2.1.html
#listVersionsv2.1
"""
return {"version":
{"status": "SUPPORTED",
"updated": "2011-01-21T11:33:21Z",
"links": [{"href": uri,
"rel": "self"},
{"href": "http://docs.openstack.org/",
"type": "text/html",
"rel": "describedby"}],
"min_version": "",
"version": "",
"media-types":
[{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2"
}],
}
} | a34b5a0e1ee3e055dd3e559e9e4a79f611654414 | 34,368 |
def number_of_peaks(ds):
"""
Number of peaks found in smoothed waveform, directly from GLAH14 data
"""
return ds.n_peaks | 29ba960cca162526a4fd5c01794ee84be6c93a75 | 34,369 |
def read_iba_file(file_path):
"""
:param file_path: absolute file path
:return: a list of strings, each item being a row in the text file
:rtype: list
"""
f = open(file_path, 'r')
text = f.read()
return text.replace('\r', '').split('\n') | f536f2b2d520799fe4f81825055315ed3a1f9d6d | 34,370 |
def format_messages(messages_dict):
""" Formats input messages in Polymer format to native Android format. This means replacing
hyphens with underscores in keys and escaping apostrophes in values. """
formatted_messages = {}
for k,v in messages_dict.items():
formatted_messages[k.replace("-", "_")] = v.replace("'", "\\'")
return formatted_messages | 00fe6bfb76ce8e146a16a3bc3f108fc2d1277908 | 34,372 |
from functools import reduce
def getAllTextWords(texts):
""" Returns a set of all the words in the given texts """
return reduce(lambda x,y: x | set(y), texts.values(), set()) | 52e475b180031b0abf67bad3fe148b9509baaed1 | 34,375 |
import sys
import os
def file_is_available(filename):
"""
Checks if a file is in use by another process
https://stackoverflow.com/a/37256114/2302759
The SO thread also includes solutions for Linux.
"""
if not sys.platform == "win32":
raise Exception("`check_if_file_is_available` is only implemented for Windows, the program will not work correctly on other operating systems.")
if os.path.exists(filename):
try:
os.rename(filename, filename)
return True
except OSError as e:
return False
return False | a93775afa70ddd74c273ca6d3b147cb686e23d10 | 34,376 |
def get_tensor_dependencies(tensor):
"""
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor.
"""
dependencies = set()
dependencies.update(tensor.op.inputs)
for sub_op in tensor.op.inputs:
dependencies.update(get_tensor_dependencies(sub_op))
return dependencies | 1c92b9fda9e5ca563bc43638f74f09c0bc16f264 | 34,378 |
def _get_vals_wo_None(iter_of_vals):
""" Returns a list of values without Nones. """
return [x for x in iter_of_vals if x is not None] | e020ec4049217c5656c74bed6d20bfcdb8a89e78 | 34,379 |
import pandas as pd
import os
def load_plot_data(attack_name):
"""
reads data saved with log_plot_data
:param attack_name: string of attack name (the folder to read from)
:return: a pandas dataFrame containing plot data.
"""
path = os.path.join('Attack Logs', attack_name, 'plot_data')
df = pd.read_csv(path)
return df | db95cf9c2d79b23f0f53015aa35a24fefd3aa65c | 34,380 |
def ao_ordering(l):
"""list of Cartesian basis function with angular momentum l"""
if l == 0:
return [(0,0,0)]
elif l == 1:
return [(1,0,0), (0,1,0), (0,0,1)]
elif l == 2:
# ordering of d-functions in TeraChem: dxy,dxz,dyz,dxx,dyy,dzz
return [(1,1,0), (1,0,1), (0,1,1), (2,0,0), (0,2,0), (0,0,2)]
else:
# The integral routines work for any angular momentum, but what ordering should
# be used for f,g,etc. functions?
raise NotImplemented("Angular momenta higher than s,p and d functions are not implemented!") | 8252fa918f9a312d2d8f34e1d50964ff2994b262 | 34,382 |
def read_rescue(spark):
"""
Reads animal rescue CSV data from HDFS
Args:
spark (SparkSession): active Spark session
Returns:
spark DataFrame: animal rescue data
"""
return spark.read.csv("/training/animal_rescue.csv",
header=True, inferSchema=True) | 6260b6914e2b1d8747791a1b2531793cd3c82781 | 34,384 |
import torch
def process_ckp(ckp_path):
"""Hack that enables checkpointing from mid-epoch."""
if not ckp_path:
return ''
if ckp_path == '.pl_auto_save.ckpt':
return ''
ckp = torch.load(ckp_path, map_location='cpu')
for key in ckp['loops']['fit_loop'][
'epoch_loop.val_loop.dataloader_progress']['current'].keys():
ckp['loops']['fit_loop']['epoch_loop.val_loop.dataloader_progress'][
'total'][key] += ckp['loops']['fit_loop'][
'epoch_loop.val_loop.dataloader_progress']['current'][key]
ckp['loops']['fit_loop']['epoch_loop.val_loop.dataloader_progress'][
'current'][key] = 0
for key in ckp['loops']['fit_loop'][
'epoch_loop.val_loop.epoch_loop.batch_progress']['current'].keys():
ckp['loops']['fit_loop'][
'epoch_loop.val_loop.epoch_loop.batch_progress']['total'][
key] += ckp['loops']['fit_loop'][
'epoch_loop.val_loop.epoch_loop.batch_progress'][
'current'][key]
ckp['loops']['fit_loop'][
'epoch_loop.val_loop.epoch_loop.batch_progress']['current'][
key] = 0
torch.save(ckp, ckp_path)
return ckp_path | 5947f63a52f2aff96dacaea19b2ede1064daf8ba | 34,385 |
def to_bool(val):
"""Conservative boolean cast - don't cast lists and objects to True, just existing booleans and strings."""
if val is None:
return None
if val is True or val is False:
return val
if isinstance(val, str):
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
raise ValueError("Could not convert string {val} to boolean. Expecting string to either say 'true' or 'false' (not case-sensitive).".format(val=val))
raise ValueError("Could not convert {val} to boolean. Expect either boolean or string.".format(val=val)) | 21934eb7ab28d53e2c415da988eee2cd3af2d2cc | 34,386 |
def get_shm_dir():
"""Get shm dir for temporary usage."""
return '/dev/shm' | 9280aa8b751e2c20307b09ad76d4720e82e5904f | 34,387 |
def node_value(node, input_values, neuron_outputs): # PROVIDED BY THE STAFF
"""
Given
* a node (as an input or as a neuron),
* a dictionary mapping input names to their values, and
* a dictionary mapping neuron names to their outputs
returns the output value of the node.
This function does NOT do any computation; it simply looks up
values in the provided dictionaries.
"""
if isinstance(node, str):
# A string node (either an input or a neuron)
if node in input_values:
return input_values[node]
if node in neuron_outputs:
return neuron_outputs[node]
raise KeyError("Node '{}' not found in either the input values or neuron outputs dictionary.".format(node))
if isinstance(node, (int, float)):
# A constant input, such as -1
return node
raise TypeError("Node argument is {}; should be either a string or a number.".format(node)) | 4088211ce025b6e868c1a9ecffdd7955d5adf168 | 34,389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.