content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def scan_album_folder(folder, file_list):
"""
Renames all files in a folder. If all the files in the folder have the same Year and Album
metadata, the folder itself will be renamed to the format "[YEAR] ALBUM"
"""
folder_data = []
folder_counts = {'found': 0, 'renamed': 0,
'unchanged': 0, 'missing': 0, 'folder_rename': ''}
if folder_data\
and len(folder_data[0]) == 3\
and folder_data[0][0]\
and folder_data[0][1]\
and all((x[0] == folder_data[0][0] and x[1] == folder_data[0][1]) for x in folder_data):
pass
for file in file_list:
folder_d = rename_file(folder, file)
if folder_d is not None:
folder_counts[folder_d[2]] += 1
folder_counts['found'] += 1
folder_data.append(folder_d)
if folder_data\
and len(folder_data[0]) == 3\
and folder_data[0][0]\
and folder_data[0][1]\
and all((x[0] == folder_data[0][0] and x[1] == folder_data[0][1]) for x in folder_data):
folder_name = YEAR_ENCLOSER[0] + \
folder_data[0][0] + YEAR_ENCLOSER[1] + ' ' + folder_data[0][1]
parent_path = re.sub(r'[^\\/]+[\\/]?$', '', folder)
if folder != '.' and folder != parent_path + folder_name:
counter = 2
base_dir = parent_path + folder_name
base_dir = re.search(r'(.*?)\.+$', base_dir)
if base_dir is None:
base_dir = parent_path + folder_name
else:
base_dir = base_dir.group(1)
base_dir = base_dir.strip()
try_dir = base_dir
while os.path.isdir(try_dir) and counter < 100:
if try_dir == folder:
break
try_dir = base_dir + ' (' + str(counter) + ')'
counter += 1
if try_dir != folder:
folder_counts['folder_rename'] = (folder, try_dir)
return folder_counts | 35,600 |
def log(s):
"""
Log a single line at a time to avoid cluttering the output.
In verbose mode, print everything and never clear.
"""
if VERBOSE:
print(f"[outsource]: {s}")
else:
sys.stdout.write("\033[K") # clear
print(f"[outsource]: {s}", end="\r") | 35,601 |
def predict_with_inferer(
images: Tensor, network, keys: List[str], inferer: Optional[SlidingWindowInferer] = None
) -> Dict[str, List[Tensor]]:
"""
Predict network dict output with an inferer. Compared with directly output network(images),
it enables a sliding window inferer that can be used to handle large inputs.
Args:
images: input of the network, Tensor sized (B, C, H, W) or (B, C, H, W, D)
network: a network that takes an image Tensor sized (B, C, H, W) or (B, C, H, W, D) as input
and outputs a dictionary Dict[str, List[Tensor]] or Dict[str, Tensor].
keys: the keys in the output dict, should be network output keys or a subset of them.
inferer: a SlidingWindowInferer to handle large inputs.
Return:
The predicted head_output from network, a Dict[str, List[Tensor]]
Example:
.. code-block:: python
# define a naive network
import torch
import monai
class NaiveNet(torch.nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, images: torch.Tensor):
return {"cls": torch.randn(images.shape), "box_reg": [torch.randn(images.shape)]}
# create a predictor
network = NaiveNet()
inferer = monai.inferers.SlidingWindowInferer(
roi_size = (128, 128, 128),
overlap = 0.25,
cache_roi_weight_map = True,
)
network_output_keys=["cls", "box_reg"]
images = torch.randn((2, 3, 512, 512, 512)) # a large input
head_outputs = predict_with_inferer(images, network, network_output_keys, inferer)
"""
if inferer is None:
raise ValueError("Please set inferer as a monai.inferers.inferer.SlidingWindowInferer(*)")
head_outputs_sequence = inferer(images, _network_sequence_output, network, keys=keys)
num_output_levels: int = len(head_outputs_sequence) // len(keys)
head_outputs = {}
for i, k in enumerate(keys):
head_outputs[k] = list(head_outputs_sequence[num_output_levels * i : num_output_levels * (i + 1)])
return head_outputs | 35,602 |
def split_data(line):
"""
method splits varibles on line
"""
data = list()
arr = np.array([string for string in line.split(", ")], dtype=str)
for _, item in enumerate(arr):
word_parse = re.compile(r''' ((?<=:.)-*[0-9]+\.*[0-9]*)''', re.X)
parts = word_parse.findall(item)
if parts != []:
data.append(float(parts[0]))
if len(data) > 1:
return data
else:
return [] | 35,603 |
def show_outcome_group_global(request_ctx, id, **request_kwargs):
"""
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param id: (required) ID
:type id: string
:return: Show an outcome group
:rtype: requests.Response (with OutcomeGroup data)
"""
path = '/v1/global/outcome_groups/{id}'
url = request_ctx.base_api_url + path.format(id=id)
response = client.get(request_ctx, url, **request_kwargs)
return response | 35,604 |
def md5sum(file: str) -> str:
"""
Create a strings with the md5 of a given file
:param file: filename of the file whose md5 is computed for
:return: md5 string
"""
md5_hash = hashlib.md5()
with open(file, "rb") as file:
content = file.read()
md5_hash.update(content)
digest = md5_hash.hexdigest()
return digest | 35,605 |
def read_data(filetype, filename, prn):
"""Calls the appropriate position reader function based on the filetype."""
func_name = filetype + '_data'
possibles = globals().copy()
possibles.update(locals())
func = possibles.get(func_name)
if func is None:
raise NotImplementedError(func + ' is not an implemented function.')
return func(filename, prn) | 35,606 |
def test_successful_binary_file_input_output_to_file(
identifier_name: str,
use_header_guard: bool,
binary_mode_flag: str,
output_to_file_flag: str,
output_filename: str,
tmp_path: Path,
):
"""Test that a binary file can be read successfully and the correct header
is generated and written to a file.
Also makes sure that the return code is 0 and nothing is written to
standard output or standard error.
"""
output_file_path = tmp_path / output_filename
result = run_cpp11_embed(
TEST_FILES_DIR / "one_line.txt",
identifier_name,
use_header_guard,
other_arguments=(binary_mode_flag, output_to_file_flag, output_file_path),
)
expected_data = "{97, 98, 99, 100, 101, 102}"
assert result.stdout == "", "Nothing written to standard output"
assert output_file_path.read_text() == _get_expected_binary_data_header(
identifier_name, use_header_guard, expected_data, 6
), "Correct header written to file"
assert result.stderr == "", "No errors reported"
assert result.returncode == 0, "No errors reported" | 35,607 |
def processContours(contours: List[float], contourpoints: List[List[float]], frame: pims.frame.Frame, debug=False) -> Tuple[List[List[float]], pims.frame.Frame]:
"""Get bounding boxes for each contour.
Parameters
----------
contours : List[float]
List of contours to find bounding boxes for.
contourpoints : List[List[float]]
List of bounding boxes. Does this need passed in?
frame : pims.frame.Frame
Frame from which the contours are from
debug : bool, optional
If true then draw bounding boxes
Returns
-------
Tuple[List[List[float]], pims.frame.Frame]
List of bounding boxes, and frame
"""
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cx = x + (w / 2)
cy = y + (h / 2)
contourpoints.append([cx, cy])
if debug:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
return contourpoints, frame | 35,608 |
def simulate_games(num_games, switch, num_doors=3):
"""
Simulate a multiple game of the Monty Hall problem.
Parameters:
- num_games: Integer, the number of games you want to simulate.
- switch: Boolean, whether or not your strategy is to switch doors
after the reveal.
- num_doors: Integer, the number of doors in the game. Default is 3
for the classic game with 2 goats and 1 car.
Returns:
1 if you won, 0 if you lost
"""
if not isinstance(num_games, int) or num_games < 1:
raise ValueError('`num_games` must be an integer greater than or equal to 1.')
wins = 0
for _ in range(num_games):
wins += play(switch, num_doors)
return f'winning percentage: {wins / num_games:.2%}' | 35,609 |
def prod(iterable:Iterable) -> Iterable:
"""math.prod support for Python versions < v3.8"""
return functools.reduce(operator.mul, iterable, 1) | 35,610 |
def compress_video(video_path):
"""
Compress video.
:param video_path: Path to the video.
:return: None.
"""
return subprocess.call(["gzip", video_path]) == 0 | 35,611 |
def trim_resource(resource):
"""
trim_resource
"""
return resource.strip(" \t\n\r/") | 35,612 |
def main():
"""
TODO: The function shows the original image and the one flipped vertically
"""
original_mt = SimpleImage('images/mt-rainier.jpg')
original_mt.show()
reflected = reflect('images/mt-rainier.jpg')
reflected.show() | 35,613 |
def wikipedia_request_page_from_geocoding(flatitude, flongitude):
""" Get list of wikipedia page identifiers related to the specified geocode """
places_list = []
loc = "{}|{}".format(flatitude, flongitude)
print(loc)
parameters = {
"action": "query",
"list": "geosearch",
"gscoord": loc,
"gsradius": __RADIUS_DEFAULT__,
"gslimit": __GS_LIMIT_DEFAULT__,
"format": "json",
}
# API Request
response = requests.get(url=__WIKIPEDiA_URL__, params=parameters)
if response.status_code == 200:
reply_dict = response.json()
places_list = reply_dict['query']['geosearch']
if places_list:
for idx, place in enumerate(places_list):
print(idx, "W#{}".format(place['pageid']), place['title'], place['dist'], "m")
else:
print('address not found')
lg.warning('address not found')
else:
print('mediawiki reply error')
lg.warning('mediawiki reply error')
del response
return places_list | 35,614 |
def globalBinarise(logger, img, thresh, maxval):
"""
This function takes in a numpy array image and
returns a corresponding mask that is a global
binarisation on it based on a given threshold
and maxval. Any elements in the array that is
greater than or equals to the given threshold
will be assigned maxval, else zero.
Parameters
----------
img : {numpy.ndarray}
The image to perform binarisation on.
thresh : {int or float}
The global threshold for binarisation.
maxval : {np.uint8}
The value assigned to an element that is greater
than or equals to `thresh`.
Returns
-------
binarised_img : {numpy.ndarray, dtype=np.uint8}
A binarised image of {0, 1}.
"""
try:
binarised_img = np.zeros(img.shape, np.uint8)
binarised_img[img >= thresh] = maxval
except Exception as e:
# logger.error(f'Unable to globalBinarise!\n{e}')
print((f"Unable to globalBinarise!\n{e}"))
return binarised_img | 35,615 |
def connect_to_db(schema='sys', database='', return_df=True):
"""Query database and fetch table data.
Args:
schema (str): MySQL table schema. Default to 'sys'.
database (str): MySQL table name. Deafult to ''.
return_df (bool): Condition to return the dataframe.
"""
load_dotenv()
db_user = os.getenv("db_user")
db_password = os.getenv("db_password")
db_host = 'traffic-accidents.c1npf904zyic.sa-east-1.rds.amazonaws.com'
db_port = '3306'
params = f'{db_user}:{db_password}@{db_host}:{db_port}/{schema}'
try:
engine = create_engine("mysql+mysqlconnector://%s" % params, max_identifier_length=128, pool_size=1)
engine.connect()
Session = sessionmaker(bind=engine)
session = Session()
except Exception:
logging.error("%s - Could not connect to database", database)
if return_df == True:
db_tb = pd.read_sql(f"SELECT * FROM {schema}.{database}", session.bind)
return db_tb, engine, session
else:
return engine, session | 35,616 |
def times_by_stencil(results):
"""Collects times of multiple results by stencils.
Args:
results: List of `Result` objects.
Returns:
A tuple of lists (stencils, times).
"""
stencils = results[0].stencils
if any(stencils != r.stencils for r in results):
raise ValueError('All results must include the same stencils')
times = by_stencils(r.times_by_stencil() for r in results)
return stencils, times | 35,617 |
def get_bulk_and_slab(bulk, miller=[1,1,1], layers=4, vacuum=16):
"""Create a slab and conventional bulk cell from a bulk cell input
Parameters
----------
bulk : pymatgen structure
pymatgen structure of the bulk material
miller : list
list of miller indices
layers : int
number of atomic layers
vacuum : float, optional
thickness of vacuum
Returns
-------
oriented_primitive_bulk_o : pymatgen structure
pymatgen structure of the bulk
primitive_slab : pymatgen structure
pymatgen structure of the slab
"""
#vaccum is now also in unit planes!!!! we adjust vacuum anyways in the end
# to do. set absolute thickness and then calculate how many layers these are, making it
# an even number in total, so no atom is exactlty at 0.5 so we have always one central
# layer that is unrelaxed when doubling the cell!!!
# Achtung: reorient lattice has Problems: orthogonal cell is the wrong!!!
# so do it by hand via newstructure lattice
sl = SlabGenerator(bulk, miller, layers, vacuum, lll_reduce=True,
center_slab=True, in_unit_planes=True, primitive=True,
max_normal_search=None, reorient_lattice=False)
slab = sl.get_slab()
primitive_slab = slab.get_orthogonal_c_slab()
inplaneshift = primitive_slab.frac_coords[np.argmax(primitive_slab.frac_coords[:,2])]
inplaneshift[2] = 0
primitive_slab = Structure(
Lattice.from_lengths_and_angles(
primitive_slab.lattice.lengths, primitive_slab.lattice.angles),
primitive_slab.species, primitive_slab.frac_coords-inplaneshift,
validate_proximity=True, to_unit_cell=True,
coords_are_cartesian=False,)
slab_bulkref = slab.oriented_unit_cell
#The bulkref is not primitive and not oriented like slab!!!
zgen = ZSLGenerator_mod()
atoms = AseAtomsAdaptor.get_atoms(slab_bulkref)
res = list(zgen(slab_bulkref.lattice.matrix[:2,:],
slab.lattice.matrix[:2,:], lowest=True))
#print(res)
#Attention: ZSLgen uses reduced_vectors (Zur) which randomly interchanges a and b vectors.
#This is totally shit to get to the same supercell. As we cannot in this way get the real transformation
tests = [np.array(i) for i in list(combinations(list(product([1, 0, -1] , repeat = 2)), 2))
if np.isclose(np.abs(np.linalg.det(np.array(i))), 1.)]
for t in tests:
tt = np.dot(t, np.dot(res[0]['substrate_transformation'], slab.lattice.matrix[:2,:]))
if np.isclose(slab_bulkref.lattice.matrix[:2,:]-tt, 0).all():
break
inv = np.linalg.inv(np.dot(t, res[0]['substrate_transformation']))
break
backtrafomatrix = np.linalg.inv(
np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]])).astype(int)
sst = SupercellTransformation(backtrafomatrix)
newbulkcell = sst.apply_transformation(slab_bulkref)
t = res[0]['substrate_transformation']
bstrafo = np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]])
prim_bulk_cell = np.dot( np.linalg.inv(bstrafo), newbulkcell.lattice.matrix)
# Here we find the in-plane primitive lattice vectors for the bulk cell
# it seems the lattice is still in directions xyz as the bulk.
# this is nice because then we can get the exact rotation matrix w.r.t. the bulk conventional cell
# one could implement the strain contributions here
# Now we could take over the lattice directly from the slab structure and put e.g. also all slab atóms in the bulk cell
#they are still not aligned in xyz, which we want to do now!!!
tests = Structure(Lattice(prim_bulk_cell), [list(newbulkcell.species)[0]],
[newbulkcell.cart_coords[0]], validate_proximity=True,
to_unit_cell=True, coords_are_cartesian=True)
species = newbulkcell.species
coords = newbulkcell.cart_coords
s = tests.copy()
# we add the other atoms
for i, sp in enumerate(species):
try:
s.insert(i, sp, coords[i],\
coords_are_cartesian=True,\
validate_proximity=True)
except:
pass
oriented_primitive_bulk = s.get_sorted_structure()
#put into cell
oriented_primitive_bulk = Structure(oriented_primitive_bulk.lattice,
oriented_primitive_bulk.species,
oriented_primitive_bulk.cart_coords,
validate_proximity=True,to_unit_cell=True,
coords_are_cartesian=True)
def test(matrix1, matrix2):
vecs = (np.isclose(np.linalg.norm(matrix1[0]), np.linalg.norm(matrix2[0]))
and np.isclose(np.linalg.norm(matrix1[2]), np.linalg.norm(matrix2[2])))
r = np.cross(matrix1[0], matrix1[1])
right = (np.dot(r, matrix1[2]) > 0)
return vecs, right
combinationslist = [[[1,0],[0,1]], [[-1,0],[0,1]], [[-1,0],[0,-1]], [[1,0],[0,-1]],\
[[0,1],[1,0]], [[0,-1],[1,0]], [[0,-1],[-1,0]], [[0,1],[-1,0]], ]
for c in combinationslist:
for c3 in [1,-1]:
m = np.zeros((3,3))
m[:2,:2] = np.array(c)
m[2,2] = c3
newm = np.dot(m, oriented_primitive_bulk.lattice.matrix)
vecs, right = test(newm, primitive_slab.lattice.matrix)
if vecs and right:
break
sst = SupercellTransformation(m.astype(int))
oriented_primitive_bulk = sst.apply_transformation(oriented_primitive_bulk)
#this is the primitive bulk, with surface spanned by 0 and 1 component but not oriented!
#slab is already orthogonalized an orthognonalized slab
primitive_slab_L = primitive_slab.lattice.matrix
primitive_slab_LTM2 = np.cross(primitive_slab_L[0], primitive_slab_L[1])
primitive_slab_LTM2 /= np.linalg.norm(primitive_slab_LTM2)
primitive_slab_LT = [primitive_slab_L[0], primitive_slab_L[1], primitive_slab_LTM2]
# this is prim slab lattice matrix with 1 length in zdir
# z-component does not matter
# this is a fake lattice to find rotation matrix in 3D
#oriented prim bulk is oriented as slab abnd not as the the orthogonalized prim slab lattice
oriented_primitive_bulk_L = oriented_primitive_bulk.lattice.matrix
oriented_primitive_bulk_LTM2 = np.cross(oriented_primitive_bulk_L[0],
oriented_primitive_bulk_L[1])
oriented_primitive_bulk_LTM2 /= np.linalg.norm(oriented_primitive_bulk_LTM2)
oriented_primitive_bulk_LT = [oriented_primitive_bulk_L[0],
oriented_primitive_bulk_L[1], oriented_primitive_bulk_LTM2]
# this is a fake lattice to find rotation matrix in 3D
#it should be tested if this is really a rot (LH and RH lattice is enforced by cross)
#Note there could be still lattice vector 1 be lattice vector 2
rot = np.dot(np.linalg.inv(oriented_primitive_bulk_LT), primitive_slab_LT)
print("THIS VALUE SHOULD BE 1 ALWAYS", np.linalg.det(rot))
oriented_primitive_bulk_lattice = np.dot( oriented_primitive_bulk_L, rot )
oriented_primitive_bulk_o = Structure(Lattice(oriented_primitive_bulk_lattice),
oriented_primitive_bulk.species,
oriented_primitive_bulk.frac_coords,
validate_proximity=True,
to_unit_cell=True,
coords_are_cartesian=False)
return oriented_primitive_bulk_o, primitive_slab | 35,618 |
def test_build_ad_multiple_extensions(tmp_path):
"""Build an AD object with multiple extensions and check that we retrieve
everything in the correct order after writing.
"""
shape = (4, 5)
testfile = tmp_path / 'test.fits'
ad = astrodata.create({})
for i in range(1, 4):
nd = NDData(np.zeros(shape) + i,
uncertainty=VarianceUncertainty(np.ones(shape)),
mask=np.zeros(shape, dtype='uint16'))
ad.append(nd)
ad[-1].OBJCAT = Table([[i]])
ad[-1].MYARR = np.zeros(10) + i
ad.REFCAT = Table([['ref']])
ad.write(testfile)
ad2 = astrodata.open(testfile)
for ext, ext2 in zip(ad, ad2):
assert_array_equal(ext.data, ext2.data)
assert_array_equal(ext.MYARR, ext2.MYARR)
assert_array_equal(ext.OBJCAT['col0'], ext2.OBJCAT['col0']) | 35,619 |
def gogogo_figure(ipympl, figsize, ax=None):
"""
gogogo the greatest function name of all
"""
if ax is None:
if ipympl:
with ioff:
fig = figure(figsize=figsize)
ax = fig.gca()
else:
fig = figure(figsize=figsize)
ax = fig.gca()
return fig, ax
else:
return ax.get_figure(), ax | 35,620 |
def unzip6(xs):
"""
unzip6 :: [(a, b, c, d, e, f)] -> ([a], [b], [c], [d], [e], [f])
The unzip6 function takes a list of six-tuples and returns six lists,
analogous to unzip.
"""
a = L[(i[0] for i in xs)]
b = L[(i[1] for i in xs)]
c = L[(i[2] for i in xs)]
d = L[(i[3] for i in xs)]
e = L[(i[4] for i in xs)]
f = L[(i[5] for i in xs)]
return a, b, c, d, e, f | 35,621 |
def grep(lines=None,expr=None,index=False):
"""
Similar to the standard unit "grep" but run on a list of strings.
Returns a list of the matching lines unless index=True is set,
then it returns the indices.
Parameters
----------
lines : list
The list of string lines to check.
expr : str
Scalar string expression to search for.
index : bool, optional
If this is ``True`` then the indices of matching lines will be
returned instead of the actual lines. index is ``False`` by default.
Returns
-------
out : list
The list of matching lines or indices.
Example
-------
Search for a string and return the matching lines:
.. code-block:: python
mlines = grep(lines,"hello")
Search for a string and return the indices of the matching lines:
.. code-block:: python
index = grep(lines,"hello",index=True)
"""
if lines is None: raise ValueError("lines must be input")
if expr is None: raise ValueError("expr must be input")
out = []
cnt = 0
for l in np.array(lines,ndmin=1):
m = re.search(expr,l)
if m != None:
if index is False:
out.append(l)
else:
out.append(cnt)
cnt = cnt+1
return out | 35,622 |
def get_table_map_from_text(sp: BeautifulSoup, keep_table_contents=True) -> Dict:
"""
Generate table dict only
:param sp:
:param keep_table_contents:
:return:
"""
table_map = dict()
for flt in sp.find_all('float'):
try:
if flt.name and flt.get('name') == 'table':
if flt.get('id'):
# normalize table id
ref_id = flt.get('id').replace('uid', 'TABREF')
# form tabmap entry
table_map[ref_id] = {
"num": flt.get('id-text', None),
"text": None, # placeholder
"content": extract_table(flt) if keep_table_contents else None,
"ref_id": ref_id
}
for row in flt.find_all('row'):
row.decompose()
except AttributeError:
print('Attribute error with table float: ', flt.name)
continue
for tab in sp.find_all('table'):
try:
# skip inline tables
if tab.get('rend') == 'inline':
continue
# process them
if tab.name and tab.get('id'):
# normalize table id
ref_id = tab.get('id').replace('uid', 'TABREF')
# form tabmap entry
table_map[ref_id] = {
"num": tab.get('id-text', None),
"text": None, # placeholder
"content": extract_table(tab) if keep_table_contents else None,
"ref_id": ref_id
}
for row in tab.find_all('row'):
row.decompose()
except AttributeError:
print('Attribute error with table: ', tab.name)
continue
return table_map | 35,623 |
def tag_copier(path, cliargs):
"""This is the tag copier worker function.
It gets a path from the Queue and searches index for the
same path and copies any existing tags (from index2)
Updates index's doc's tag and tag_custom fields.
"""
doclist = []
# doc search (matching path) in index for existing tags from index2
# filename
f = os.path.basename(path[0])
# parent path
p = os.path.abspath(os.path.join(path[0], os.pardir))
data = {
"size": 1,
"_source": ['tag', 'tag_custom'],
"query": {
"query_string": {
"query": "filename: \"" + f + "\" AND path_parent: \"" + p + "\""
}
}
}
# check if file or directory
if path[3] == 'directory':
# search ES
res = es.search(index=cliargs['index'], doc_type='directory', body=data,
request_timeout=config['es_timeout'])
else:
res = es.search(index=cliargs['index'], doc_type='file', body=data,
request_timeout=config['es_timeout'])
# mark task done if no matching path in index and continue
if len(res['hits']['hits']) == 0:
return True
# existing tag in index2
docid = res['hits']['hits'][0]['_id']
# update tag and tag_custom fields in index
d = {
'_op_type': 'update',
'_index': cliargs['index'],
'_type': path[3],
'_id': docid,
'doc': {'tag': path[1], 'tag_custom': path[2]}
}
if path[3] is 'directory':
doclist.append(d)
else:
doclist.append(d)
index_bulk_add(es, doclist, config, cliargs) | 35,624 |
def turn(speed, secs=None, radius=0, reverse=False):
"""
Makes the robot turn at the specified speed for the specified number of seconds.
If seconds is a number > 0 the function will block (wait) until that time has elapsed and stop the motors
If seconds is None then the speeds will be set and the function will return
Negative speed is a left turn. Speeds are cm/s must be between -30 and 30.
By default it turns on the spot, but a radius in cm can be specified.
Args:
speed (float): Motor speed to base the turn off. If turning on the spot this will be actual speed.
Will be scaled if radius > 0. Must be between -30 and 30 (cm/s)
secs (float): Optional number of seconds to leave the motors running at the desired speed before stopping
radius (float): Optional radius (in cm) to make the turn in
reverse (boolean): If True then the turn will be done in reverse
Raises:
Exception on invalid arguments
Returns:
None
"""
speed = restrictSpeed(speed)
radius = restrictRadius(radius)
params = _calcMotorSpeedsAndTime(speed, radius, None, reverse)
speeds = params["speeds"]
if secs != None:
secs = restrictTime(secs)
else:
secs = 0
write(speeds[0], speeds[1], secs) | 35,625 |
def grainfromVertices(R=None,fname='shape.txt',mixed=False,eqv_rad=10.,rot=0.,radians=True,min_res=4):
"""
This function generates a mesh0 from a text file containing a list of its vertices
in normalised coordinates over a square grid of dimensions 1 x 1. Centre = (0,0)
coordinates must be of the form:
j i
x x
x x
x x
. .
. .
. .
and the last coordinate MUST be identical to the first. Additionally function will take
an array R instead, of the same form.
Args:
mixed: logical; partially filled cells on or off
rot: float; rotation of the grain (radians)
areascale: float; Fraction between 0 and 1, indicates how to scale the grain
min_res: int; Minimum resolution allowed for a grain
Returns:
mesh_: square array with filled cells, with value 1
"""
if radians is not True: rot = rot*np.pi/180.
assert eqv_rad > 0, "ERROR: Equivalent radius must be greater than 0!"
# If no coords provided use filepath
if R is None:
J_ = np.genfromtxt(fname,comments='#',usecols=0,delimiter=',')
I_ = np.genfromtxt(fname,comments='#',usecols=1,delimiter=',')
# else use provided coords
elif type(R) == list:
R = np.array(R)
if type(R) == np.ndarray:
J_ = R[:,0]
I_ = R[:,1]
# if coords not yet normalised; normalise them onto the range -1. to 1.
if np.amax(abs(I_)>1.) or np.amax(abs(J_))>1.:
MAXI = np.amax(I_)
MINI = np.amin(I_)
MAXJ = np.amax(J_)
MINJ = np.amin(J_)
diffI = MAXI - MINI
diffJ = MAXJ - MINJ
# scale coords onto whichever coordinates have the largest difference
if diffI>diffJ:
I_ = 2.*(I_-MINI)/(MAXI-MINI) - 1.
J_ = 2.*(J_-MINI)/(MAXI-MINI) - 1.
else:
I_ = 2.*(I_-MINJ)/(MAXJ-MINJ) - 1.
J_ = 2.*(J_-MINJ)/(MAXJ-MINJ) - 1.
# last point MUST be identical to first; append to end if necessary
if J_[0] != J_[-1]:
J_ = np.append(J_,J_[0])
I_ = np.append(I_,I_[0])
# equivalent radius is known and polygon area is known
# scale shape as appropriate
radius = np.sqrt(polygon_area(I_,J_)/np.pi)
lengthscale = eqv_rad/radius
J_ *= lengthscale
I_ *= lengthscale
# rotate points according by angle rot
theta = rot
ct = np.cos(theta)
st = np.sin(theta)
J = J_*ct - I_*st
I = J_*st + I_*ct
# find max radii from centre and double it for max width
radii = np.sqrt(I**2+J**2)
maxwidth = int(2*np.amax(radii)+2)
maxwidth = max(maxwidth,min_res)
if maxwidth%2!=0: maxwidth+=1
# Add double max rad + 1 for mini mesh dims
mesh_ = np.zeros((maxwidth,maxwidth))
# define ref coord as 0,0 and centre to mesh_ centre
qx = 0.
qy = 0.
y0 = float(maxwidth/2.)
x0 = y0
I += x0
J += y0
path = mpath.Path(np.column_stack((I,J)))
for i in range(maxwidth):
for j in range(maxwidth):
in_shape = path.contains_point([i+.5,j+.5])
if in_shape and mixed == False: mesh_[i,j] = 1.
elif in_shape and mixed == True:
for ii in np.arange(i,i+1,.1):
for jj in np.arange(j,j+1,.1):
in_shape2 = path.contains_point([ii+.05,jj+.05])
if in_shape2: mesh_[i,j] += .01
return mesh_ | 35,626 |
def snoop_task_log_handler(level=logging.DEBUG):
"""Context manager for a text log handler.
This captures in memory the entire log of running its context.
It's used to capture Task logs in the database.
Args:
level: log level, by default logging.DEBUG
"""
stream = StringIO()
handler = logging.StreamHandler(stream)
handler.setLevel(level)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
try:
yield handler
finally:
handler.flush()
root_logger.removeHandler(handler) | 35,627 |
def get_block(blockidx, blocksz, obj):
"""
Given obj, a list, return the intersection of
obj[blockidx*blocksz:(blockidx+1)*blocksz] and obj
Ex: get_block(2, 100, range(250) returns [200, 201, ..., 249]
"""
if blockidx*blocksz > len(obj):
return []
elif (blockidx+1)*blocksz > len(obj):
return obj[blockidx*blocksz:]
else:
return obj[blockidx*blocksz:(blockidx+1)*blocksz] | 35,628 |
def AddTensors(workspace, init_net, blob_names):
"""add tensors"""
for blob_name in blob_names:
blob = workspace.FetchBlob(str(blob_name))
AddTensor(init_net, blob_name, blob) | 35,629 |
def RDS(net,waves,coupons,p,size,seeds,posseed,poswave):
"""Conducts respondent-driven sampling
Input:
net: network, networkx graph
waves: maximum number of waves, integer (use 0 with poswave=True for contract tracing)
coupons: number of coupons per respondent, integer
p: probability of participation, float
size: target sample size
seeds: number of seeds
posseed: whether the seed should be HIV-positive, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net
poswave: whether recruitment continues past wave limit for positive agents, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net
Output:
sampled: list of sampled nodes
"""
#Check if HIV status is needed
if posseed or poswave:
#Check for missing HIV status node attribute
if nx.get_node_attributes(net,"hiv_status")=={}:
#Warning
warnings.warn('Warning Message: no node attribute "hiv_status", posseed and poswave set to False')
#Set posseed to False
posseed=False
#Set poswave to False
poswave=False
#Count number of nodes
n=np.shape(net)[0]
#Initialize sample
sample={}
#Initialize list of already sampled agents
sampled=[]
#Check for HIV positive seed
if posseed:
#Choose seeds from HIV positive nodes
seed=rand.choices([x for x,y in net.nodes(data=True) if y['hiv_status']==1],k=seeds)
#Random seed
else:
#Choose seeds from all nodes
seed=rand.choices(list(range(n)),k=seeds)
#Store seeds as 0th wave
sample[0]=seed
#Add seed to list of sampled agents
sampled=sampled+seed
#Initilaize wave counter
wave=0
#Initilaize count of nodes sampled
nodes=1
#Check for waves still to be completed, unsampled nodes, nodes sampled in previous wave, and under target sample size
while wave<waves and nodes<n and sample[wave]!=[] and nodes<size:
#Increase wave counter
wave=wave+1
#Initialize list of nodes sampled in current wave
sample[wave]=[]
#loop through nodes sampled in previous wave
for i in sample[wave-1]:
#Identify neighbors of node i
nbrs=list(net[i])
#Remove already sampled nodes
nbrs=list(set(nbrs)-set(sampled))
#Initialize count of used coupons
used=0
#Check for unsampled nodes and remaining coupons
while used<coupons and nbrs!=[]:
#Sample one node from list of neighbors
node=rand.choice(nbrs)
#Probabilioty check on node participation
if np.random.uniform(0,1)<p:
#Add sampled node to list of nodes sampled during current wave
sample[wave]=sample[wave]+[node]
#Add sampled node to list of sampled nodes
sampled=sampled+[node]
#Increase counter for sampled nodes
nodes=nodes+1
#Increase count of used coupons
used=used+1
#Remove node from list of neighbors
nbrs.remove(node)
else:
#Remove node from list of neighbors
nbrs.remove(node)
#Check for continuing past final wave for HIV-positive agents
if poswave:
#Create network from last wave
last=nx.subgraph(net,sample[wave])
#Generate list of HIV-positive nodes in last wave
positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1]
#Check for HIV-positive nodes in last wave, unsampled nodes, and nodes sampled in previous wave
while positive!=[] and nodes<n and sample[wave]!=[]:
wave=wave+1
#Initialize list of nodes sampled in current wave
sample[wave]=[]
#loop through nodes sampled in previous wave
for i in positive:
#Identify neighbors of node i
nbrs=list(net[i])
#Remove already sampled nodes
nbrs=list(set(nbrs)-set(sampled))
#Initialize count of used coupons
used=0
#Check for unsampled nodes and remaining coupons
while used<coupons and nbrs!=[]:
#Sample one node from list of neighbors
node=rand.choice(nbrs)
#Probabilioty check on node participation
if np.random.uniform(0,1)<p:
#Add sampled node to list of nodes sampled during current wave
sample[wave]=sample[wave]+[node]
#Add sampled node to list of sampled nodes
sampled=sampled+[node]
#Increase counter for sampled nodes
nodes=nodes+1
#Increase count of used coupons
used=used+1
#Remove node from list of neighbors
nbrs.remove(node)
else:
#Remove node from list of neighbors
nbrs.remove(node)
#Create network from last wave
last=nx.subgraph(net,sample[wave])
#Generate list of HIV-positive nodes in last wave
positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1]
return sampled | 35,630 |
def get_dataset_mrnet_args(parser, args=[]):
"""
Get all relevant parameters to handle the dataset
-> here: MRNET
"""
# determine path
if platform.system() == "Linux":
path = "/home/biomech/Documents/OsteoData/MRNet-v1.0/"
else:
path = "C:/Users/Niko/Documents/data/MRNet-v1.0/MRNet-v1.0"
# path = "C:/Users/ga46yeg/data/MRNet-v1.0"
# Dataset MRNet:
# ------------------------------------------------------------------------
parser.add_argument(
"--root_dir_mrnet", type=str, default=path, help="Directory of the dataset"
)
parser.add_argument(
"--perspectives",
type=list,
default=["axial", "coronal", "sagittal"],
help="Perspectives of the Mr Scans",
)
parser.add_argument(
"--classes",
type=list,
default=["abn", "acl", "men"],
help="Classify for these classes",
)
# ------------------------------------------------------------------------
return parser | 35,631 |
def update_fields(obj, **kwargs):
"""Updates only fields given in kwargs.
"""
for key, value in kwargs.items():
setattr(obj, key, value)
obj.save(update_fields=kwargs.keys()) | 35,632 |
def convertImageToBase64(image):
""" Convert image to base64 for transmission
Args:
image (obj): opencv image object
Returns:
(str): image encoded as base64
"""
# im_arr: image in Numpy one-dim array format.
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
return base64.b64encode(im_bytes).decode('utf-8') | 35,633 |
def anchor_inside_flags(flat_anchors, valid_flags, tsize, allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 2).
valid_flags (torch.Tensor): An existing valid flags of anchors.
tsize (int): Temporal size of current video.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a
valid range.
"""
if allowed_border >= 0:
inside_flags = (
valid_flags & (flat_anchors[:, 0] >= -allowed_border) &
(flat_anchors[:, 1] < tsize + allowed_border))
else:
inside_flags = valid_flags
return inside_flags | 35,634 |
def print_step_s____(info: str) -> None:
"""single print"""
if not Debug:
return
_log("┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
_log(ProgressMarkPrefix + info.replace('\n', '\n┃'))
_log("┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") | 35,635 |
def verify_callback_handlers(clazz):
"""Verifies that methods adding listener/callback have overload
for specifying delivery thread."""
# Ignore UI packages which assume main thread
skip = [
"animation",
"view",
"graphics",
"transition",
"widget",
"webkit",
]
for s in skip:
if s in clazz.pkg.name_path: return
if s in clazz.extends_path: return
# Ignore UI classes which assume main thread
if "app" in clazz.pkg.name_path or "app" in clazz.extends_path:
for s in ["ActionBar","Dialog","Application","Activity","Fragment","Loader"]:
if s in clazz.fullname: return
if "content" in clazz.pkg.name_path or "content" in clazz.extends_path:
for s in ["Loader"]:
if s in clazz.fullname: return
found = {}
by_name = collections.defaultdict(list)
for m in clazz.methods:
if m.name.startswith("unregister"): continue
if m.name.startswith("remove"): continue
if re.match("on[A-Z]+", m.name): continue
by_name[m.name].append(m)
for a in m.args:
if a.endswith("Listener") or a.endswith("Callback") or a.endswith("Callbacks"):
found[m.name] = m
for f in found.values():
takes_handler = False
for m in by_name[f.name]:
if "android.os.Handler" in m.args:
takes_handler = True
if not takes_handler:
warn(clazz, f, "L1", "Registration methods should have overload that accepts delivery Handler") | 35,636 |
def args2command(*args):
""" to convert positional arguments to string list """
try:
assert None not in args
assert "" not in args
except:
print("args:", args)
raise(ValueError("None values not allowed in args!"))
return [str(_).strip() for _ in args] | 35,637 |
def parse_one(line: str) -> Tuple[Optional[str], List[str]]:
"""
Returns (first corruption char, remaining stack)
"""
stack = []
for c in line:
if c in BRACKET_MAP.keys():
stack.append(c)
continue
expected = BRACKET_MAP[stack[-1]]
if c != expected:
return c, stack
stack.pop()
return None, stack | 35,638 |
def load_callable_dotted_path(dotted_path, raise_=True, reload=False):
"""
Like load_dotted_path but verifies the loaded object is a callable
"""
loaded_object = load_dotted_path(dotted_path=dotted_path,
raise_=raise_,
reload=reload)
if not callable(loaded_object):
raise TypeError(f'Error loading dotted path {dotted_path!r}. '
'Expected a callable object (i.e., some kind '
f'of function). Got {loaded_object!r} '
f'(an object of type: {type(loaded_object).__name__})')
return loaded_object | 35,639 |
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine() | 35,640 |
def generate_diagonals():
"""
Cоздает словарь диагоналей на которые модет встать конь и массив с возможным количеством вариантов
дойти в кажду точку этой диагонали
:return: словарь - где ключ это число диагонали а значения, это список из
возможных способов добраться до точек на этой диагонали
"""
diagonals_dict: dict[str, list[int]] = {'2': [1]}
for diagonal_number in range(5, 50, 3):
prev_list: dict[str, list[int]] = diagonals_dict[str(diagonal_number - 3)]
new_list: list[int] = []
for i in range(0, len(prev_list) - 1, 1):
new_list.append(prev_list[i] + prev_list[i + 1])
diagonals_dict[str(diagonal_number)] = [1] + new_list + [1]
return diagonals_dict | 35,641 |
def physical_rad_to_pix(im_prod: Union[Image, RateMap, ExpMap], physical_rad: Quantity,
coord: Quantity, z: Union[float, int] = None, cosmo=None) -> Quantity:
"""
Another convenience function, this time to convert physical radii to pixels. It can deal with both angular and
proper radii, so long as redshift and cosmology information is provided for the conversion from proper radii
to pixels.
:param Image/RateMap/ExpMap im_prod:
:param Quantity physical_rad: The physical radius to be converted to pixels.
:param Quantity coord: The position of the object being analysed.
:param float/int z: The redshift of the object (only required for input proper distance units like kpc).
:param cosmo: The chosen cosmology for the analysis (only required for input proper distance units like kpc).
:return: The converted radii, in an astropy Quantity with pix units.
:rtype: Quantity
"""
if physical_rad.unit.is_equivalent("kpc") and z is not None and cosmo is not None:
conv_rads = rad_to_ang(physical_rad, z, cosmo).to('deg')
elif physical_rad.unit.is_equivalent("kpc") and (z is None or cosmo is None):
raise ValueError("If you wish to convert to convert from proper distance units such as kpc, you must supply "
"a redshift and cosmology")
elif physical_rad.unit.is_equivalent("deg"):
conv_rads = physical_rad.to('deg')
elif physical_rad.unit == pix:
raise UnitConversionError("You are trying to convert from pixel units to pixel units.")
else:
conv_rads = None
raise UnitConversionError("cen_rad_units doesn't appear to be a distance or angular unit.")
phys_to_pix = 1 / pix_deg_scale(coord, im_prod.radec_wcs).value
conv_rads = Quantity(conv_rads.value * phys_to_pix, 'pix')
return conv_rads | 35,642 |
def conversation_type_frequency_distribution(convo):
"""
Returns the type frequency (unigram) distribution for the convo.
Parameters
----------
convo : Conversation
Returns
-------
collections.Counter
"""
return reduce(lambda x, y: x + y, map(post_freq, convo.posts.values())) | 35,643 |
def create_file(filename: str):
""" Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
filename = os.path.expanduser(filename)
if not path_is_read_writable(os.path.dirname(filename)):
raise PermissionError(f"Insufficient permissions to create {filename}")
os.makedirs(os.path.dirname(filename), exist_ok=True)
# with NamedLock(filename): # TODO: Implement combo_lock with file lock support or add lock utils to neon_utils DM
with open(filename, 'w') as f:
f.write('') | 35,644 |
def _cleanup_legacy_namespace(input_string):
"""
At some point in time, the ttml namespace was TTML_NAMESPACE_URI_LEGACY,
then it got changed to TTML_NAMESPACE_URI. There are tons of those floating
around, including our pre-dmr dfxps and ttmls files. The backend (this lib)
can deal with both namespaces, but the amara front end cannot. We therefore
convert all namespaces to the correct one (else a lot of namespace xml magic
has to be done on the front end, and trust me, you don't want to do it).
This function 'converts' all ...ttfa... to ...ttml... with a regex. To be a
bit less reckless, we're checking that it's quoted, as in an attribute. (that
of course doesn't guarantee the safety of this, just makes it a bit less
likely that the legacy url is being used inside a text node. All of this
because lxml cannot change namespace attribute values:
https://bugs.launchpad.net/lxml/+bug/555602
"""
input_string = TTML_NAMESPACE_URI_LEGACY_NO_ANCHOR_RE.sub(r'"%s\3' % TTML_NAMESPACE_URI, input_string)
return TTML_NAMESPACE_URI_LEGACY_RE.sub(r'"%s\3\4' % TTML_NAMESPACE_URI, input_string) | 35,645 |
def get_config_filename(args):
"""Tries to automatically set the config to use."""
if args.config is None:
try:
if args.varn == 'Blocking':
args.config = 'plot_map_config_blocking'
if args.varn == 'GeopotentialHeight':
args.config = 'plot_map_config_gph'
# add config names here
except IOError:
pass | 35,646 |
def _err_to_json(key, *args):
"""Translate an error key to the full JSON error response"""
assert (key in errors)
code = errors[key][0]
title = errors[key][1]
detail = errors[key][2].format(*args)
return json.dumps({
'message':
title,
'errors': [{
'title': title,
'detail': detail,
'code': code
}]
}) | 35,647 |
def multiref_represent(opts, tablename, represent_string = "%(name)s"):
"""
Represent a list of references
@param opt: the current value or list of values
@param tablename: the referenced table
@param represent_string: format string to represent the records
"""
if not opts:
return current.messages.NONE
s3db = current.s3db
table = s3db.table(tablename, None)
if table is None:
return current.messages.NONE
if not isinstance(opts, (list, tuple)):
opts = [opts]
rows = current.db(table.id.belongs(opts)).select()
rstr = Storage([(str(row.id), row) for row in rows])
keys = rstr.keys()
represent = lambda o: str(o) in keys and \
represent_string % rstr[str(o)] or \
current.messages.UNKNOWN_OPT
vals = [represent(o) for o in opts]
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or current.messages.NONE
return vals | 35,648 |
def selected_cells(self):
"""Get the selected cells. Synchronous, so returns a list.
Returns:
A list of Cells.
"""
cells = []
generator = self.selected_cells_async()
for chunk in generator:
for value in chunk.cells:
cells.append(value)
return cells | 35,649 |
def calc_annual_capital_addts_ferc1(steam_df, window=3):
"""
Calculate annual capital additions for FERC1 steam records.
Convert the capex_total column into annual capital additons the
`capex_total` column is the cumulative capital poured into the plant over
time. This function takes the annual difference should generate the annual
capial additions. It also want generates a rolling average, to smooth out
the big annual fluxuations.
Args:
steam_df (pandas.DataFrame): result of `prep_plants_ferc()`
Returns:
pandas.DataFrame: augemented version of steam_df with two additional
columns: `capex_annual_addt` and `capex_annual_addt_rolling`.
"""
# we need to sort the df so it lines up w/ the groupby
steam_df = steam_df.sort_values(IDX_STEAM)
# we group on everything but the year so the groups are multi-year unique
# plants the shift happens within these multi-year plant groups
steam_df['capex_total_shifted'] = steam_df.groupby(
[x for x in IDX_STEAM if x != 'report_year'])[['capex_total']].shift()
steam_df = steam_df.assign(
capex_annual_addt=lambda x: x.capex_total - x.capex_total_shifted
)
addts = pudl.helpers.generate_rolling_avg(
steam_df,
group_cols=[x for x in IDX_STEAM if x != 'report_year'],
data_col='capex_annual_addt',
window=window
)
steam_df_w_addts = (
pd.merge(
steam_df,
addts[IDX_STEAM + ['capex_total', 'capex_annual_addt_rolling']],
on=IDX_STEAM + ['capex_total'],
how='left',
)
.assign(
capex_annual_per_mwh=lambda x:
x.capex_annual_addt / x.net_generation_mwh,
capex_annual_per_mw=lambda x:
x.capex_annual_addt / x.capacity_mw,
capex_annual_per_kw=lambda x:
x.capex_annual_addt / x.capacity_mw / 1000,
capex_annual_per_mwh_rolling=lambda x:
x.capex_annual_addt_rolling / x.net_generation_mwh,
capex_annual_per_mw_rolling=lambda x:
x.capex_annual_addt_rolling / x.capacity_mw,
)
)
steam_df_w_addts = add_mean_cap_addts(steam_df_w_addts)
# bb tests for volumne of negative annual capex
neg_cap_addts = len(
steam_df_w_addts[steam_df_w_addts.capex_annual_addt_rolling < 0]) \
/ len(steam_df_w_addts)
neg_cap_addts_mw = (
steam_df_w_addts[
steam_df_w_addts.capex_annual_addt_rolling < 0]
.net_generation_mwh.sum()
/ steam_df_w_addts.net_generation_mwh.sum())
message = (f'{neg_cap_addts:.02%} records have negative capitial additions'
f': {neg_cap_addts_mw:.02%} of capacity')
if neg_cap_addts > .1:
warnings.warn(message)
else:
logger.info(message)
return steam_df_w_addts | 35,650 |
def status():
"""Show status information for all available devices.""" | 35,651 |
async def async_upload_file(serialUID, filepath, upload_blockinfo):
"""异步上传文件"""
ts = int(time.time() * 1000)
# 计算分片CRC32
data, crc32 = get_block_crc32(filepath, upload_blockinfo["startOffset"], upload_blockinfo["endOffset"])
upload_blockinfo['dataCRC32'] = crc32
# 数据加密和签名
request_data, sign_sha256 = encry_and_sign(upload_blockinfo, ts)
uri_path = settings.get("APICONF").get("API_FILE_UPLOAD") + serialUID
url = parse.urljoin(settings.get("URL").get("AI_UPLOAD_SERVER_URL"), uri_path)
# build auth
authinfo = build_authinfo(uri=uri_path, verb='POST', sign=sign_sha256, timestamp=ts)
headers = {"Authorization": authinfo, "uploadInfo": json.dumps(request_data),
"Content-Type": settings.Content_Type}
client = tornado.httpclient.AsyncHTTPClient()
request = tornado.httpclient.HTTPRequest(url, method="POST", body=data, headers=headers, validate_cert=False)
res = await client.fetch(request, raise_error=False)
return res | 35,652 |
def parse(parser_name=None, file_key=None, **kwargs):
"""Call the given parser and return parsed data
It is possible to give file key instead of parser name. In that case the name of the parser will be read from the
file list.
TODO: This is the old style of running parsers, can be deleted when all parsers are new style.
Args:
parser_name (String): Name of parser
file_key (String): Used to look up parser in the Where file list.
kwargs: Input arguments to the parser
Returns:
Parser: The parsed data
"""
return setup_parser(parser_name=parser_name, file_key=file_key, **kwargs).parse() | 35,653 |
def grafico_barre_qualitative_risposta(X, y, qualitative, n_columns):
"""Grafico a barre della relazione tra qualitative e risposta"""
n_var = len(qualitative)
for i, var in enumerate(qualitative):
ax = plt.subplot(math.ceil(n_var / n_columns), n_columns, i + 1)
pd.concat([X[var], y], axis=1).groupby(var).mean().plot(ax=ax,
kind='bar')
plt.tight_layout()
plt.grid() | 35,654 |
async def remove(message: discord.Message, trigger: Annotate.Content):
""" Remove user alias with the specified trigger. Use `*` to delete all. """
if trigger == "*":
aliases.data[str(message.author.id)] = {}
await aliases.asyncsave()
await client.say(message, "**Removed all aliases.**")
# Check if the trigger is in the would be list (basically checks if trigger is in [] if user is not registered)
assert trigger in aliases.data.get(str(message.author.id), []), \
f"**Alias `{trigger}` has never been set. Check `{list_aliases.cmd.name_prefix(message.guild)}`.**"
# Trigger is an assigned alias, remove it
aliases.data[str(message.author.id)].pop(trigger)
await aliases.asyncsave()
await client.say(message, f"**Alias `{trigger}` removed.**") | 35,655 |
def create_cv_split(file_train, file_test, col_label='label', col_group=None, n_folds=5, splitter='skf', random_state=33):
"""
Parameters:
splitter : str
"kf", "skf", "gkf"
Example:
train_df, test_df = create_cv_split(os.path.join(args.data_dir, 'Train.csv'),
os.path.join(args.data_dir, 'Test.csv'),
col_label='Label',
col_group=None,
n_folds=5,
splitter='skf',
random_state=33)
"""
#
# In KFold and StratifiedKFold "groups" are always ignored
# so we just make substitute to unify split call
if col_group is None:
col_group = col_label
train_df = pd.read_csv(file_train)
test_df = pd.read_csv(file_test)
#
# Label encoded label
le = LabelEncoder()
train_df[col_label + '_le'] = le.fit_transform(train_df[col_label])
# Fake label for test (just for compatibility)
test_df[col_label] = 0
test_df[col_label + '_le'] = 0
# Template column for fold_id
train_df['fold_id'] = 0
test_df['fold_id'] = 0 # (just for compatibility)
# Check train/test columns
assert list(train_df.columns) == list(test_df.columns), 'Different set or order of columns in train/test'
if splitter == 'kf':
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'skf':
kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'gkf':
kf = GroupKFold(n_splits=n_folds)
else:
raise ValueError('Posible values for splitter are: "kf", "skf", and "gkf"')
for fold_id, (train_index, val_index) in enumerate(kf.split(X=train_df, y=train_df[col_label].values, groups=train_df[col_group].values)):
train_df.loc[train_df.index.isin(val_index), 'fold_id'] = fold_id
# Check fold_id: must have corresponding number of folds
assert len(train_df['fold_id'].unique()) == n_folds, 'Inconsistent number of folds'
# Check fold_id: must be consequtive and start from 0
lst = list(train_df['fold_id'])
assert list(np.sort(np.unique(lst))) == list(range(0, max(lst)+1)), 'Non-consequtive, or starts not from 0'
# Check groups: must not intersect
if splitter == 'gkf':
for i in range(n_folds):
assert train_df[train_df['fold_id'] == i][col_group].isin(train_df[train_df['fold_id'] != i][col_group]).sum() == 0, 'Groups are intersected'
# Shuffle
# We use random_state+1 because 'df.sample' with the same seed after 'KFold.split' will re-create initial order
train_df = train_df.sample(frac=1.0, random_state=random_state+1)
#
return train_df, test_df | 35,656 |
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None | 35,657 |
def _trim_name(image):
"""Remove the slash at the end of the filename."""
return image[:-1] if image[-1] == '/' else image | 35,658 |
def check_for_pyd_so(file_path):
""" Checks if a file with .pyd or .so extension exists """
return True if os.path.isfile(file_path+'.pyd') or os.path.isfile(file_path+'.so') else False | 35,659 |
def load_data(data_file):
"""Loads data CSV into input and target ndarrays of shape (n_samples,
features).
Args:
data_file (str): Local or remote CSV file containing data to load.
Returns:
ndarray: Input data of shape (n_samples, in_features).
ndarray: Target data of shape (n_samples, out_features).
"""
# Copy remote file from GCS
filename = path.basename(data_file)
if data_file.startswith('gs://'):
dirname = mkdtemp()
local_file = path.join(dirname, filename)
remote_file = data_file
copy_from_gcs(remote_file, local_file)
else:
dirname = path.dirname(data_file)
df = pd.read_csv(path.join(dirname, filename), header=[0, 1])
X = df['Input'].values
Y = df['Target'].values
# Remove temporary directory if remote
if data_file.startswith('gs://'):
rmtree(dirname)
return X, Y | 35,660 |
def sines_sum(parameters: ndarray) -> Callable:
"""
Construct a sum of sines for given parameters.
Parameters
----------
parameters : ndarray
y0, amplitude1, frequency1, phase1, amplitude2, frequency2, phase2, ...
Returns
-------
function
f(x) = amplitude1*sin(2*pi*frequency1*x + phase1) +
amplitude2*sin(2*pi*frequency2*x + phase2) + ... + y0
"""
par = parameters
def _sines_sum(x):
y = 0
for i in range(len(parameters) // 3):
i *= 3
y += par[i + 1] * np.sin(2 * np.pi * par[i + 2] * x + par[i + 3])
return y + par[0]
return _sines_sum | 35,661 |
def _divide_and_conquer_convex_hull(points):
"""
Notes:
O(n * log(n))
Args:
points:
Returns:
"""
count = len(points)
if count < 6:
return Hull(_jarvis_convex_hull(points))
midpoint = count // 2
min_cloud, max_cloud = points[:midpoint], points[midpoint:]
min_hull = _divide_and_conquer_convex_hull(min_cloud)
max_hull = _divide_and_conquer_convex_hull(max_cloud)
return __merge_convex_hulls(min_hull, max_hull) | 35,662 |
def _parsed_method_to_method(
parsed: Union[parse.UnderstoodMethod, parse.ImplementationSpecificMethod]
) -> Union[UnderstoodMethod, ImplementationSpecificMethod]:
"""Translate the parsed method into an intermediate representation."""
if isinstance(parsed, parse.ImplementationSpecificMethod):
return ImplementationSpecificMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
parsed=parsed,
)
elif isinstance(parsed, parse.UnderstoodMethod):
return UnderstoodMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
body=parsed.body,
parsed=parsed,
)
else:
assert_never(parsed)
raise AssertionError("Should have never gotten here") | 35,663 |
def stop_docker(name=container_name, cid=None, let_fail=False):
"""Stop docker container with given name tag
Parameters
----------
name: str
name field which has been attached to the container we wish to remove
cid: str
container ID, if known
let_fail: bool
whether to raise an exception if the underlying commands return an
error.
"""
try:
if cid is None:
print('Finding %s ...' % name)
cmd = shlex.split('docker ps -q --filter "name=%s"' % name)
cid = subprocess.check_output(cmd).strip().decode()
if cid:
print('Stopping %s ...' % cid)
subprocess.call(['docker', 'kill', cid])
subprocess.call(['docker', 'rm', cid])
except subprocess.CalledProcessError as e:
print(e)
if not let_fail:
raise | 35,664 |
def NS(namespace, tag):
"""
Generate a namespaced tag for use in creation of an XML file
"""
return '{' + XML_NS[namespace] + '}' + tag | 35,665 |
def dump_requirements(nodes, strict=False):
"""Dump packages and their versions to a string.
Format of the string is like a "requirements.txt"::
# created with python-X.X
package-1==1.2.3
package-2==2.3.4
:param nodes: List of ast nodes in a module.
:param strict: If *True* throw an exception if a package is not found
:returns: String containing requirements.
"""
result = f'# created with python-{".".join([str(x) for x in sys.version_info[:3]])}\n'
for package in get_packages(nodes):
if package in STDLIBNAMES:
continue
try:
dist = get_distribution_name(package)
except RequirementNotFound as exc:
if strict and package not in _ignore_requirements:
raise exc
warn(f'The "{package}" requirement was not found.')
continue
result += f'{dist}=={version(dist)}\n'
return result | 35,666 |
def non_contradiction_instance_2(person_list,
place_list,
n,
vi_function=vi,
not_vi_function=not_vi,
Everyone_str="Everyone",
every_place_str="every place"):
"""
T = {every x every P v(x,P)}
new = not v(xi, xj) ----------- 0
"""
people = get_n_different_items(person_list, 2)
sentence1 = vi_function(Everyone_str, every_place_str)
sentence2 = not_vi_function(people[0], people[1])
return sentence1, sentence2, 0 | 35,667 |
def init_args(parser):
"""
Add argument required to configure clocks.
"""
parser.add_argument('--freq', action='append', metavar="NAME=VALUE",
help='Set the frequency of oscillator') | 35,668 |
def load_w2v_model(w2v_path):
"""
Loads pretrained w2v model
:param w2v_path:
:return:
"""
return gensim.models.Word2Vec.load(w2v_path) | 35,669 |
def derive_keys(token, secret, strategy):
"""Derives keys for MAC and ENCRYPTION from the user-provided
secret. The resulting keys should be passed to the protect and
unprotect functions.
As suggested by NIST Special Publication 800-108, this uses the
first 128 bits from the sha384 KDF for the obscured cache key
value, the second 128 bits for the message authentication key and
the remaining 128 bits for the encryption key.
This approach is faster than computing a separate hmac as the KDF
for each desired key.
"""
digest = hmac.new(secret, token + strategy, HASH_FUNCTION).digest()
return {'CACHE_KEY': digest[:DIGEST_SPLIT],
'MAC': digest[DIGEST_SPLIT: 2 * DIGEST_SPLIT],
'ENCRYPTION': digest[2 * DIGEST_SPLIT:],
'strategy': strategy} | 35,670 |
def bitinfo_holding_ts(
track_addr: Optional[str] = None,
track_coin: Optional[str] = None,
timeframe: Optional[str] = "4h",
sma: Optional[int] = 20,
):
"""Scrap the data from bitinfo and calculate the balance based on the resample frequency.
track_addr (str): The address to track.
track_coin (str): The coin to track.
timeframe (str): The resample frequency.
sma (int): The moving average window.
For example, if the website url is
https://bitinfocharts.com/dogecoin/address/DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k-full/
track_coin value would be `dogecoin` and track_addr would be `DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k`.
For timeframe, we support frequency that listed on pandas doc, common value would be '4h', '1h', '1d'
Full list of timeframe available: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
LOGGER.info(f"Scrapping data for {track_coin}, wallet address: {track_addr}")
track_addr = TRACK_ADDRESS if track_addr is None else track_addr
track_coin = TRACK_COIN if track_coin is None else track_coin
df_holding_ts = get_wallet_holding_data(
coin=track_coin,
address=track_addr,
)
balance_ts = produce_time_series(df_holding_ts, timeframe, sma)
return balance_ts | 35,671 |
def sharpe(p):
"""Sharpe ratio of the returns"""
try:
return p.mean()/p.std()*np.sqrt(252)
except ZeroDivisionError:
logging.error("Zero volatility, divide by zero in Sharpe ratio.")
return np.inf | 35,672 |
def create_manual_slab_ase(lattice='fcc', miller=None, host_symbol='Fe',
latticeconstant=4.0, size=(1, 1, 5), replacements=None, decimals=10,
pop_last_layers=0):
"""
Wraps ase.lattice lattices generators to create a slab having given lattice vectors directions.
:param lattice: 'fcc' and 'bcc' are supported. Set the host lattice of a slab.
:param miller: a list of directions of lattice vectors
:param symbol: a string specifying the atom type
:param latticeconstant: the lattice constant of a structure
:param size: a 3-element tuple that sets supercell size. For instance, use (1,1,5) to set
5 layers of a slab.
:param decimals: sets the rounding of atom positions. See numpy.around.
:param pop_last_layers: specifies how many bottom layers to remove. Sometimes one does not want
to use the integer number of unit cells along z, extra layers can be
removed.
:return structure: an ase-lattice representing a slab with replaced atoms
"""
if miller is None:
miller = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
if lattice == 'fcc':
from ase.lattice.cubic import FaceCenteredCubic
structure_factory = FaceCenteredCubic
elif lattice == 'bcc':
from ase.lattice.cubic import BodyCenteredCubic
structure_factory = BodyCenteredCubic
else:
raise ValueError(
'The given lattice {} is not supported'.format(lattice))
structure = structure_factory(miller=miller, symbol=host_symbol, pbc=(1, 1, 0),
latticeconstant=latticeconstant, size=size)
* _, layer_occupancies = get_layer_by_number(structure, 0)
if replacements is not None:
keys = six.viewkeys(replacements)
if max((abs(int(x)) for x in keys)) >= len(layer_occupancies):
raise ValueError('"replacements" has to contain numbers less than number of layers')
else:
replacements = {}
layer_occupancies.append(0) # technical append
atoms_to_pop = np.cumsum(np.array(layer_occupancies[-1::-1]))
for i in range(atoms_to_pop[pop_last_layers]):
structure.pop()
current_symbols = structure.get_chemical_symbols()
for i, at_type in six.iteritems(replacements):
if isinstance(i, str):
i = int(i)
layer, layer_z, layer_occupancies = get_layer_by_number(structure, i)
layer_occupancies.insert(0, 0)
if i < 0:
i = i - 1
atoms_to_skip = np.cumsum(np.array(layer_occupancies))[i]
for k in range(layer_occupancies[i + 1]):
current_symbols[k+atoms_to_skip] = at_type
structure.set_chemical_symbols(current_symbols)
structure.positions = np.around(structure.positions, decimals=decimals)
return structure | 35,673 |
def telephone():
"""Generates random 10 digit phone numbers and returns them as a dictionary entry"""
num = ""
#
for i in range(1, 11):
num += str(rand.randint(0, 9))
if(i < 7 and i % 3 == 0):
num += "-"
return {"telephone":num} | 35,674 |
def get_kline(symbol: str, end_date: [datetime, str], freq: str,
start_date: [datetime, str] = None, count=None, fq: bool = False) -> List[RawBar]:
"""获取K线数据
:param symbol: 币安期货的交易对 BTCUSDT/ETHUSDT
:param start_date: 开始日期
:param end_date: 截止日期
:param freq: K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', 'M']
:param count: K线数量,最大值为 5000
:param fq: 是否进行复权
:return: pd.DataFrame
>>> start_date = datetime.strptime("20200101", "%Y%m%d")
>>> end_date = datetime.strptime("20210701", "%Y%m%d")
>>> df1 = get_kline(symbol="BTCUSDT", start_date=start_date, end_date=end_date, freq="1min")
>>> df2 = get_kline(symbol="000001.XSHG", end_date=end_date, freq="1min", count=1000)
>>> df3 = get_kline(symbol="000001.XSHG", start_date='20200701', end_date='20200719', freq="1min", fq=True)
>>> df4 = get_kline(symbol="000001.XSHG", end_date='20200719', freq="1min", count=1000)
"""
# 从币安获取k线数据
if count and count > 1300:
warnings.warn(f"count={count}, 超过5000的最大值限制,仅返回最后5000条记录")
end_date = datetime.now()
result = []
if start_date:
start_date = pd.to_datetime(start_date)
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
startTime=start_date.timestamp() * 1000,
endTime=end_date.timestamp() * 1000)
except:
print("重连了")
time.sleep(2)
elif count:
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
endTime=end_date.timestamp() * 1000,
limit=count)
except:
print("重连了")
time.sleep(2)
else:
raise ValueError("start_date 和 count 不能同时为空")
bars = []
for kline in result:
bars.append(RawBar(symbol=symbol, dt=datetime.fromtimestamp(kline.openTime / 1000),
open=round(float(kline.open), 2),
close=round(float(kline.close), 2),
high=round(float(kline.high), 2),
low=round(float(kline.low), 2),
vol=int(float(kline.volume))))
return bars | 35,675 |
def test_get_formatted_as_type_bool_true_with_1_input():
"""On get_formatted_as_type returns bool True with int 1 input."""
context = Context({'k1': True})
result = context.get_formatted_as_type(1, out_type=bool)
assert isinstance(result, bool)
assert result | 35,676 |
def get_terms(properties, out_log, classname):
""" Gets energy terms """
terms = properties.get('terms', dict())
if not terms or not isinstance(terms, list):
fu.log(classname + ': No terms provided or incorrect format, exiting', out_log)
raise SystemExit(classname + ': No terms provided or incorrect format')
if not is_valid_term(terms):
fu.log(classname + ': Incorrect terms provided, exiting', out_log)
raise SystemExit(classname + ': Incorrect terms provided')
return properties.get('terms', '') | 35,677 |
def get_prefix(allow_base=False):
"""Get $CONDA_PREFIX as pathlib.Path object."""
confirm_active()
prefix = Path(os.environ.get("CONDA_PREFIX"))
if not allow_base and is_base_env(prefix):
raise ImportError(
"Base conda env detected, activate an environment before running this command..."
)
return prefix | 35,678 |
def handle_validate(bot, ievent):
""" validate provided url or last url in log """
url = None
if ievent.rest: url = ievent.rest
else:
if plugins.url: url = plugins.fetch("url").latest(bot, ievent)
if not url: ievent.missing('<url>') ; return
try: url = valid_url(url)
except urllib2.HTTPError, e: ievent.reply('error: %s' % e) ; return
if not url: ievent.reply('invalid or bad URL') ; return
result = geturl_validate(url)
if result:
host = urlparse.urlparse(url)[1]
if len(host) > 20: host = host[0:20] + '...'
ievent.reply('%s: %s | modified: %s | server: %s | size: %s | content-type: %s | encoding: %s | doctype: %s' % \
tuple([host] + [result[x] for x in ['result', 'modified', 'server', 'size', 'content', 'encoding', 'doctype']])) | 35,679 |
def _decomp_MAMFile(srcfile, destfile=''):
""" Superfetch file이나 Prefetch file의 MAM 포맷의 압축을 푼다. """
f = open(srcfile, 'rb')
data = f.read()
f.close()
# 압축된 파일인지 확인한다.
"""
MAX\x84 : Windows 8 이상 수퍼패치 파일
MAX\x04 : Windows 10 프리패치 파일
"""
id = data[0:3].decode('utf8') # MAM
b1 = ord(data[3:4]) # b'\x84' , b'\x04'
if (id != 'MAM') or (not b1 in [0x84, 0x04]):
print('[Error] Unknown format.')
exit()
decomp_size = struct.unpack('<i', data[4:8])[0] # 압축 풀었을때 데이터 크기 (decomp_size)
compdata_stpos = 8 # Signature + Total uncompressed data size
if b1 == 0x84: # SuperFetch 포맷이면...
compdata_stpos += 4 # Unknown (checksum?)
data = data[compdata_stpos:] # 압축된 데이터 (data)
dest_data = bytearray(decomp_size) # 압축 푼 데이터 출력 공간을 확보한다.
dest_data = comp.XpressHuffman['OpenSrc'].Decompress(data, dest_data)
if destfile == '':
return dest_data
else:
o = open(destfile, 'wb')
o.write(dest_data)
o.close()
return True | 35,680 |
def cg(A, b, x=None, tol=1e-10, verbose=0, f=10, max_steps=None):
"""
Parameters
----------
A: A matrix, or a function capable of carrying out matrix-vector products.
"""
n = b.size
b = b.reshape(n)
if x is None:
x = np.zeros(n)
else:
x = x.reshape(n)
if isinstance(A, np.ndarray):
A = MatrixVectorProduct(A)
max_steps = max_steps or n
alpha = None
r = b - A(x)
d = r.copy()
A_dot_d = A(d)
r_dot_r = r.dot(r)
for i in range(min(n, max_steps)):
if i != 0:
if f > 0 and i % f == 0:
r = b - A(x)
else:
r -= alpha * A_dot_d
old_r_dot_r = r_dot_r
r_dot_r = r.dot(r)
beta = r_dot_r / old_r_dot_r
d = r + beta * d
A_dot_d = A(d)
if verbose:
print("Step {}".format(i))
print("Drift: {}.".format(np.linalg.norm(r - b + A(x))))
print("R norm: {}.".format(np.linalg.norm(r)))
d_energy_norm = d.dot(A_dot_d)
if d_energy_norm < tol:
break
alpha = r_dot_r / d_energy_norm
x += alpha * d
if verbose:
r = b - A(x)
print("Final residual norm: {}.".format(np.linalg.norm(r)))
return x | 35,681 |
def test_run_ingest(db, mocker, clients, prep_file):
"""
Test the run_ingest function.
"""
client = clients.get("Administrator")
mock_genomic_workflow = mocker.patch(
"creator.ingest_runs.tasks.ingest_run"
".ingest_genomic_workflow_output_manifests"
)
user = User.objects.first()
# Create data. The last Version will have a non-GWO FileType
for _ in range(3):
prep_file(authed=True)
files = list(File.objects.all())
for file_ in files[:-1]:
file_.file_type = FileType.GWO.value
file_.save()
file_versions = [f.versions.first() for f in files]
"""
1) Happy Case
Call run_ingest. Check that genomic_file_workflow got
called. Check that ir.state == 'completed'.
"""
happy_versions = file_versions[:-1]
# An initial IngestRun with no issues
happy_ir = setup_ingest_run(happy_versions, user)
run_ingest(happy_ir.id)
assert IngestRun.objects.all().count() == 1
mock_genomic_workflow.assert_called_once()
mock_genomic_workflow.reset_mock()
happy_ir = IngestRun.objects.get(pk=happy_ir.id)
assert happy_ir.state == "completed"
assert not happy_ir.error_msg
"""
2) Non-GWO Case
Call run_ingest on an IngestRun with a version that doesn't have a GWO
root_file. Exception should be raised, and state should become failed.
"""
bad_ir = setup_ingest_run(file_versions, user)
with pytest.raises(Exception):
assert run_ingest(bad_ir.id)
bad_ir = IngestRun.objects.get(pk=bad_ir.id)
assert bad_ir.state == "failed"
assert "Unknown file type detected" in bad_ir.error_msg
"""
3) Exception Case
Call run_ingest on an IngestRun with all GWO versions. Mock out
_ingest_genomic_workflow_manifest_ and give it an exception side effect
and check that the IngestRun goes to a failed state.
"""
ER = "ERROR"
except_ir = setup_ingest_run(happy_versions[:1], user)
mock_genomic_workflow.side_effect = Exception(ER)
with pytest.raises(Exception):
run_ingest(except_ir.id)
mock_genomic_workflow.assert_called_once()
except_ir = IngestRun.objects.get(pk=except_ir.id)
assert except_ir.state == "failed"
assert ER == except_ir.error_msg | 35,682 |
def get_ase(f_file, p_insert_red, s_insert_red):
""" Combine framework and insert atoms to one ase structure
f_file : framework file name
p_insert_red: positions of reduced inserted species
s_insert_red: symbols of reduced inserted species
"""
struct = read(f_file)
insert = Atoms(s_insert_red, p_insert_red)
struct_tot = struct + insert
view(struct_tot) | 35,683 |
def from_string(spec):
"""Construct a Device from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A Device.
"""
return Device().parse_from_string(spec) | 35,684 |
def load_HDFS_data_timestamp_approach(input_path, time_delta_sec, timestamp_format, cached_workflow_path='data_df.csv', sep=',', encoding ='utf-8', cache_workflow=True):
"""
Downloads cached workflow data from csv file
Args:
input_path: path to cached workflow csv file
time_delta_sec: analyzed period of time in seconds
timestamp_format: timestamp format in logs
cached_workflow_path: path to cached workflow csv file
cache_workflow: cache workflow or not
Returns:
x_data: array of lists of event id's np.array(['E21', 'E22', ...], [...],...)
"""
print('====== Input data summary ======')
struct_log = pd.read_csv(input_path, sep=sep,encoding=encoding,header=0)
freq_val = str(time_delta_sec) + 'S'
struct_log['Timestamp'] = pd.to_datetime(struct_log['Timestamp'], format=timestamp_format, errors='ignore')
struct_log = struct_log.drop(['LineId', 'Pid'], axis=1)
struct_log.set_index('Timestamp', inplace=True)
struct_log = struct_log.groupby(pd.Grouper(freq=freq_val)).apply(lambda x:(x + ',').sum())
struct_log = pd.DataFrame(struct_log['EventId'])
# drop rows of NaT values in struct_log.index
struct_log = struct_log[pd.notnull(struct_log.index)]
data_dict = OrderedDict()
for idx, row in struct_log.iterrows():
group_id_list = str(idx)
if not group_id_list in data_dict:
data_dict[group_id_list] = None
data_dict[group_id_list] = list(filter(None, str(row['EventId']).split(',')))
data_df = pd.DataFrame(list(data_dict.items()), columns=['group_id', 'event_sequence'])
data_df['number_of_events'] = data_df['event_sequence'].apply(lambda x: len(x))
cols = ['group_id', 'number_of_events', 'event_sequence']
data_df = data_df[cols]
if cache_workflow:
data_df.to_csv(cached_workflow_path, index=False)
x_data = data_df['event_sequence'].values
print('Total: {} instances'.format(x_data.shape[0]))
return x_data | 35,685 |
def test_method(task_name, method_name, tempdir, image):
"""Test application of a method."""
import anndata
task = getattr(openproblems.tasks, task_name)
method = getattr(task.methods, method_name)
adata = task.api.sample_dataset()
openproblems.log.debug(
"Testing {} method from {} task".format(method.__name__, task.__name__)
)
adata = method(adata, test=True)
assert isinstance(adata, anndata.AnnData)
assert task.api.check_method(adata) | 35,686 |
def pairwise_to_multiple(pwise, ref_seq, moltype, info=None):
"""
turns pairwise alignments to a reference into a multiple alignment
Parameters
----------
pwise
Series of pairwise alignments to ref_seq as
[(non-refseq name, aligned pair), ...]
ref_seq
The sequence common in all pairwise alignments
moltype
molecular type for the returned alignment
info
info object
Returns
-------
ArrayAlign
"""
if not hasattr(ref_seq, "name"):
raise TypeError(f"ref_seq must be a cogent3 sequence, not {type(ref_seq)}")
refseqs = [s for _, aln in pwise for s in aln.seqs if s.name == ref_seq.name]
ref_gaps = _gap_union(refseqs)
m = gap_coords_to_map(ref_gaps, len(ref_seq))
aligned = [Aligned(m, ref_seq)]
for other_name, aln in pwise:
curr_ref = aln.named_seqs[ref_seq.name]
curr_ref_gaps = dict(curr_ref.map.get_gap_coordinates())
other_seq = aln.named_seqs[other_name]
other_gaps = dict(other_seq.map.get_gap_coordinates())
diff_gaps = _combined_refseq_gaps(curr_ref_gaps, ref_gaps)
inject = _gaps_for_injection(other_gaps, diff_gaps, len(other_seq.data))
if inject:
m = gap_coords_to_map(inject, len(other_seq.data))
other_seq = Aligned(m, other_seq.data)
aligned.append(other_seq)
# default to ArrayAlign
return Alignment(aligned, moltype=moltype, info=info).to_type(
array_align=True, moltype=moltype
) | 35,687 |
def get_dashboard(title: str):
"""Get a dashboard by title"""
dashboards = sdk.search_dashboards(title=title)
if not dashboards:
print(f"dashboard {title} was not found")
return None
return dashboards[0] | 35,688 |
def set_juju_model(model_name):
"""Point environment at the given model.
:param model_name: Model to point environment at
:type model_name: str
"""
global CURRENT_MODEL
os.environ["JUJU_MODEL"] = model_name
CURRENT_MODEL = model_name | 35,689 |
async def get_party(sid, ulist):
"""
Request party info to users in ulist
"""
async with sio.session(sid) as session:
requester = session['user']
if requester in ulist:
print(f"{requester} wants its own party. Igoring.")
ulist.remove(requester)
for user in ulist:
await sio.emit('get_party', requester, sid=login_users[user])
print(f"{requester} requests party update to {user}") | 35,690 |
def upload_openmrs(sink: fhir_client.FhirClient, patient_bundle: bundle.Bundle,
locations: Dict[str, str]):
"""Upload Patient history bundles to OpenMRS.
For each bundle, we have to know the individual Patient, Encounters, and
Observations resources before uploading as the OpenMRS FHIR Module does not
yet support uploading Bundles. OpenMRS does not suppport uploading the
hospital and practitioner files as well.
Args:
sink: OpenMRS FHIR endpoint to talk with
patient_bundle: list of all the Bundles that need to be uploaded
locations: dictionary of location_id/location_name
"""
upload_handler = uploader.Uploader(sink)
patient_bundle.extract_resources()
upload_handler.upload_openmrs_bundle(patient_bundle, locations)
patient_bundle.save_mapping() | 35,691 |
def monitor(game_id: int,
population: Population,
debug: bool = False,
duration: int = 0,
genome: Genome = None,
):
"""Monitor a single run of the given genome that contains a single GRU-node."""
print("\n===> MONITORING GENOME <===\n")
if genome is None: genome = population.best_genome
game_config = deepcopy(population.config)
if duration > 0: game_config.game.duration = duration
# Take first GRU or SRU node
node_type = None
for n in genome.get_used_nodes().values():
t = type(n)
if t != OutputNodeGene and t != SimpleNodeGene:
node_type = t
break
if node_type is None:
raise Exception(f"No hidden node to monitor in genome {genome}")
if node_type == GruNodeGene:
from population.utils.visualizing.monitor_genome_single_gru import main as gru_monitor
gru_monitor(
population=population,
game_id=game_id,
genome=genome,
game_cfg=game_config,
debug=debug,
)
elif node_type == GruNoResetNodeGene:
from population.utils.visualizing.monitor_genome_single_gru_nr import main as gru_nr_monitor
gru_nr_monitor(
population=population,
game_id=game_id,
genome=genome,
game_cfg=game_config,
debug=debug,
)
elif node_type == SimpleRnnNodeGene or node_type == FixedRnnNodeGene:
from population.utils.visualizing.monitor_genome_single_sru import main as sru_monitor
sru_monitor(
average=2,
population=population,
game_id=game_id,
genome=genome,
game_cfg=game_config,
debug=debug,
)
elif node_type == LstmNodeGene:
from population.utils.visualizing.monitor_genome_single_lstm import main as lstm_monitor
lstm_monitor(
population=population,
game_id=game_id,
genome=genome,
game_cfg=game_config,
debug=debug,
)
else:
raise Exception(f"Not able to monitor the genome of config:\n{genome}") | 35,692 |
def translate_tensor(tensor, input_size=32, nt=2):
"""
Data augmentation function to enforce periodic boundary conditions.
Inputs are arbitrarily translated in each dimension
"""
ndim = len(tensor[0,0, :].shape)
t = input_size//nt
t_vec = np.linspace(0, (nt-1)*t, nt).astype(int)
for i in range(len(tensor)):
if ndim == 2:
tensor1 = torch.roll(tensor[i,0, :], (np.random.choice(t_vec),
np.random.choice(t_vec)),
(0, 1)) # translate by random no. of units (0-input_size) in each axis
elif ndim == 3:
tensor1 = torch.roll(tensor[i,0, :], (
np.random.choice(input_size), np.random.choice(input_size), np.random.choice(input_size)), (0, 1, 2))
else:
raise
if i == 0:
newtensor = tensor1.unsqueeze(0).unsqueeze(0) # add back channel dim and batch dim
else:
newtensor = torch.cat((newtensor,tensor1.unsqueeze(0).unsqueeze(0)),dim=0)
return newtensor | 35,693 |
async def get_icon(ctx: commands.Context, *, member: discord.Member = None) -> None:
"""
Send the author's icon url to the channel
:param member: (Optional) you can pass a member if you want to view this member's icon
"""
# This command is not allowed in #general
if not client.helpers.can_execute(ctx, unallowed_channels=[const.GENERAL_ID]):
await ctx.send(f"{ctx.author.mention} You can't use this command in <#{const.GENERAL_ID}>")
return
if not member:
url = ctx.author.avatar_url
await ctx.send(f"{ctx.author.mention} your icon is located at: {url}")
else:
url = member.avatar_url
await ctx.send(f"{ctx.author.mention}. This member's icon is located at: {url}") | 35,694 |
def _get_job_resources(args):
"""Extract job-global resources requirements from input args.
Args:
args: parsed command-line arguments
Returns:
Resources object containing the requested resources for the job
"""
logging = param_util.build_logging_param(
args.logging) if args.logging else None
timeout = param_util.timeout_in_seconds(args.timeout)
log_interval = param_util.log_interval_in_seconds(args.log_interval)
return job_model.Resources(
min_cores=args.min_cores,
min_ram=args.min_ram,
machine_type=args.machine_type,
disk_size=args.disk_size,
disk_type=args.disk_type,
boot_disk_size=args.boot_disk_size,
image=args.image,
regions=args.regions,
zones=args.zones,
logging=logging,
logging_path=None,
service_account=args.service_account,
scopes=args.scopes,
cpu_platform=args.cpu_platform,
network=args.network,
subnetwork=args.subnetwork,
use_private_address=args.use_private_address,
accelerator_type=args.accelerator_type,
accelerator_count=args.accelerator_count,
nvidia_driver_version=None,
timeout=timeout,
log_interval=log_interval,
ssh=args.ssh,
enable_stackdriver_monitoring=args.enable_stackdriver_monitoring,
max_retries=args.retries,
max_preemptible_attempts=args.preemptible,
block_external_network=args.block_external_network) | 35,695 |
def am_api_post_json(api_path, data):
"""
POST json to the Archivematica API
:param api_path: URL path to request (without hostname, e.g. /api/v2/location/)
:param data: Dict of data to post
:returns: dict of json data returned by request
"""
am_url = os.environ["ARCHIVEMATICA_URL"]
am_user = os.environ["ARCHIVEMATICA_USERNAME"]
am_api_key = os.environ["ARCHIVEMATICA_API_KEY"]
am_headers = {"Authorization": f"ApiKey {am_user}:{am_api_key}"}
url = f"{am_url}{api_path}"
print(f"URL: {url}; Data: {data}")
response = requests.post(url, json=data, headers=am_headers)
print(f"Response: {response}")
response_json = response.json()
print(f"Response JSON: {response_json}")
return response_json | 35,696 |
def runtest_setup(item: pytest.Item) -> None:
"""Disable garbage collection before running tests."""
# Disable garbage collection
gc.disable() | 35,697 |
def collect_checkpoint_paths(checkpoint_dir):
"""
Generates a list of paths to each checkpoint file found in a folder.
Note:
- This function assumes, that checkpoint paths were written in relative.
Arguments:
checkpoint_dir (string):
Path to the models checkpoint directory from which to collect checkpoints.
Returns:
paths (:obj:`list` of :obj:`string`):
List of paths to each checkpoint file.
"""
listing_file = os.path.join(checkpoint_dir, 'checkpoint')
lines = []
# Collect all lines from the checkpoint listing file.
for line in open(listing_file, 'r'):
line = line.strip()
lines.append(line)
# Discard the first line since it only points to the latest checkpoint.
lines = lines[1:]
# Extract the checkpoints path and global step from each line.
# NOTE: This functions assumes, that all checkpoint paths are relative.
# all_model_checkpoint_paths: "model.ckpt-<global-step>"
# Remove "all_model_checkpoint_paths: " from each line.
lines = [line.replace('all_model_checkpoint_paths: ', '') for line in lines]
# Remove surrounding quotation marks (" .. ") from each line.
lines = [line.replace('"', '') for line in lines]
# Extract the global step from each line.
# steps = [int(line.split('-', 1)[-1]) for line in lines]
# Build absolute paths to each checkpoint file.
paths = [os.path.join(checkpoint_dir, line) for line in lines]
return paths | 35,698 |
def cosine_distance(input1, input2):
"""Computes cosine distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
input1_normed = F.normalize(input1, p=2, dim=1)
input2_normed = F.normalize(input2, p=2, dim=1)
distmat = 1 - torch.mm(input1_normed, input2_normed.t())
return distmat | 35,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.