content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def general_string_parser(content_string, location):
"""
Parse the given string of endpoint/method/header/body content
* search for all parameters in this string
** all params are replaced with a starting and ending symbol of non priority tag
* evaluate what type of parameter it is:
** enumerate type
** global variable type
** local variable type
* add these parameteres to a 'globe.all_parameters' (to indicate which global variable names were used within this application run)
Return a tuple:
* modified string -> every parameter is replaced with a non priority start and end tag
** before: https://mydomain/addUser/<:user:>/<>/<1,2,3,4,5>/<:used:>
** after: https://mydomain/addUser/<>/<>/<>/<>
# before: {'Content-type': '<123,456>', '<>': '<:var:>'}
# after: {'Content-type': '<>', '<>': '<>'}
* list of parameters found:
[
{'location': $location, 'type': 'global_variable', 'name': 'user', 'id': 0}
{'location': $location, 'type': 'global_variable', 'name': 'used', 'id': 1}
{'location': $location, 'type': 'local_variable', 'id': 2}
{'location': $location, 'type': 'enumerate', 'content': 'ABC', 'id': 3}
{'location': $location, 'type': 'enumerate', 'content': '1,2,3,4,5', 'id': 4}
]
"""
logging.debug('Calling the general_string_parser function with a following parameters: [{}, {}]'.format(content_string, location))
"""
Get the format of tags
* 2 types of tags in each part
** enumerate and local_variable (default <>)
** global_variable (default <::>)
* By default, the enumerate tag is substring of global variable tag
** the priority tag is the one which should be searched in string first
** by default, priority tag is global_variable
"""
# enumerate tags
enum_start_tag = getattr(globe.config, location).enum.start
enum_end_tag = getattr(globe.config, location).enum.end
# global varaible tags
variable_start_tag = getattr(globe.config, location).variable.start
variable_end_tag = getattr(globe.config, location).variable.end
# priority tags
prio_start_tag = getattr(globe.config, location).priority_start
prio_end_tag = getattr(globe.config, location).priority_end
# non priority tags
non_prio_start = getattr(globe.config, location).non_priority_start
non_prio_end = getattr(globe.config, location).non_priority_end
# list of all parameters in given string
parameters = []
"""
Search for all parameters in endpoint/method/header/body
variables:
* e_idx = index of location, where the enumerate start tag was found
* v_idx = index of location, where the global variable start tag was found
* position = indicates the location of pointer in string (to avoid searching tags which were already been found)
* globe.param_id_counter = current parameter id (each parameter has a different ID - in context of whole app run)
used functions:
* find_between() = to get the content between the starting and ending tag
"""
position = 0 # possitional index in the string
# the first search of enumerate and variable starting tags in string
e_idx = content_string.find(enum_start_tag)
v_idx = content_string.find(variable_start_tag)
while e_idx != -1 or v_idx != -1: # end this loop when no more starting tags were found
"""
If the ENUM and VARIABLE indexes were found in the same index
-> it means one of them is substring of another one
The one which is not a substring has always the priority
* default example: https://<:variable:>
-> both '<' and '<:' starts at the same position
-> the '<:' is more important
"""
if e_idx == v_idx:
if prio_start_tag == enum_start_tag:
"""
ENUM tag is the priority one
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
* find_between(string, start_tag, end_tag, start_replacement, end_replacement) returns tuple:
* string cut
* before: https://<:variable:>/the/rest/of/a/url
* after: <>/the/rest/of/a/url
* content between the tags
* 'variable'
* position in url
* 10
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is a enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence of it has to be searched
In this case the indexes are the same -> have to search new indexes for both
"""
e_change = e_idx
v_change = v_idx
elif prio_start_tag == variable_start_tag:
"""
VARIABLE tag is the priority one
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case the indexes are the same -> have to search new idx for both
"""
v_change = v_idx
e_change = e_idx
else:
message = "Should have never gotten here"
raise EndpointSemanticError(__name__, "general_string_parser", message)
else:
if e_idx < v_idx:
"""
ENUM is found before VARIABLE
* unless the e_idx is not -1 (no more tag was found)
-> e_idx should be evaluated before v_idx
-> v_idx will stay the same, only the new e_idx will be search at the end
"""
if e_idx == -1:
"""
No more ENUM tags were found in string
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new v_idx should be searched
(e_idx will be search as well, but from a v_idx starting point) -> algorithm will find the same one as before
"""
v_change = v_idx
e_change = v_idx
else:
"""
ENUM is found before VARIABLE
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is a enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new e_idx should be searched
(v_idx will be search as well, but from a e_idx starting point) -> algorithm will find the same one as before
"""
e_change = e_idx
v_change = e_idx
elif v_idx < e_idx:
"""
VARIABLE is found before ENUM
* unless the v_idx is not -1 (no more tag was found)
-> v_idx should be evaluated before e_idx
-> e_idx will stay the same, only the new v_idx will be search at the end
"""
if v_idx == -1:
"""
No more VARIABLE tags were found in string
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is an enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new e_idx should be searched
(v_idx will be search as well, but from a e_idx starting point) -> algorithm will find the same one as before
"""
e_change = e_idx
v_change = e_idx
else:
"""
VARIABLE is found before ENUM
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new v_idx should be searched
(e_idx will be search as well, but from a v_idx starting point) -> algorithm will find the same one as before
"""
v_change = v_idx
e_change = v_idx
else:
message = "Should have never gotten here"
raise EndpointSemanticError(__name__, "general_string_parser", message)
"""
WHILE EVALUATION
Find the first occurence of ENUM start tag or VARIABLE start tag
in the next iteration of while, the already evaluated part of content_string is ignored
(e_change+1 and v_change+1 means it starts to search from this index)
if nothing is found -> -1 is returned
"""
e_idx = content_string.find(enum_start_tag, e_change+1)
v_idx = content_string.find(variable_start_tag, v_change+1)
globe.param_id_counter += 1
return content_string,parameters | 5,327,200 |
def get_compare_collection(name, csv_line):
"""get compare collection data"""
session = tables.get_session()
if session is None:
return {'isExist': False}
response = {}
try:
collection_table = CollectionTable()
cid = collection_table.get_field_by_key(CollectionTable.collection_id,
CollectionTable.collection_name, name, session)
cip = collection_table.get_field_by_key(CollectionTable.collection_ip,
CollectionTable.collection_name, name, session)
get_collection_data_dirs(cip, cid, csv_line, response, session)
if csv_line < response['nextCsv']:
response['hasNext'] = True
else:
response['hasNext'] = False
except SQLAlchemyError as err:
LOGGER.error('Get compare collection data failed: %s', err)
return {'isExist': False}
finally:
session.close()
response['isExist'] = True
return response | 5,327,201 |
def _getSmartIndenter(indenterName, qpart, indenter):
"""Get indenter by name.
Available indenters are none, normal, cstyle, haskell, lilypond, lisp, python, ruby, xml
Indenter name is not case sensitive
Raise KeyError if not found
indentText is indentation, which shall be used. i.e. '\t' for tabs, ' ' for 4 space symbols
"""
indenterName = indenterName.lower()
if indenterName in ('haskell', 'lilypond'): # not supported yet
logger.warning('Smart indentation for %s not supported yet. But you could be a hero who implemented it' % indenterName)
from qutepart.indenter.base import IndentAlgNormal as indenterClass
elif 'none' == indenterName:
from qutepart.indenter.base import IndentAlgBase as indenterClass
elif 'normal' == indenterName:
from qutepart.indenter.base import IndentAlgNormal as indenterClass
elif 'cstyle' == indenterName:
from qutepart.indenter.cstyle import IndentAlgCStyle as indenterClass
elif 'python' == indenterName:
from qutepart.indenter.python import IndentAlgPython as indenterClass
elif 'ruby' == indenterName:
from qutepart.indenter.ruby import IndentAlgRuby as indenterClass
elif 'xml' == indenterName:
from qutepart.indenter.xmlindent import IndentAlgXml as indenterClass
elif 'haskell' == indenterName:
from qutepart.indenter.haskell import IndenterHaskell as indenterClass
elif 'lilypond' == indenterName:
from qutepart.indenter.lilypond import IndenterLilypond as indenterClass
elif 'lisp' == indenterName:
from qutepart.indenter.lisp import IndentAlgLisp as indenterClass
elif 'scheme' == indenterName:
from qutepart.indenter.scheme import IndentAlgScheme as indenterClass
else:
raise KeyError("Indenter %s not found" % indenterName)
return indenterClass(qpart, indenter) | 5,327,202 |
def scheming_multiple_choice_output(value):
"""
return stored json as a proper list
"""
if isinstance(value, list):
return value
try:
return json.loads(value)
except ValueError:
return [value] | 5,327,203 |
def logmap(x, x0):
"""
This functions maps a point lying on the manifold into the tangent space of a second point of the manifold.
Parameters
----------
:param x: point on the manifold
:param x0: basis point of the tangent space where x will be mapped
Returns
-------
:return: vector in the tangent space of x0
"""
if np.ndim(x0) < 2:
x0 = x0[:, None]
if np.ndim(x) < 2:
x = x[:, None]
theta = np.arccos(np.dot(x0.T, x))
u = (x - x0 * np.cos(theta)) * theta/np.sin(theta)
u[:, theta[0] < 1e-16] = np.zeros((u.shape[0], 1))
return u | 5,327,204 |
def test_mg_k009_mg_k009_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : sequence: with 5 elements, all elements
appeared and are in defined order
"""
assert_bindings(
schema="msData/modelGroups/mgK009.xsd",
instance="msData/modelGroups/mgK009.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,327,205 |
def parse_field(source, loc, tokens):
"""
Returns the tokens of a field as key-value pair.
"""
name = tokens[0].lower()
value = normalize_value(tokens[2])
if name == 'author' and ' and ' in value:
value = [field.strip() for field in value.split(' and ')]
return (name, value) | 5,327,206 |
def test_template_raw(device_raw, template, op):
"""Configurable test with template file.
The rollback[+-no] operation specifies an index in a list of all
commits that have been performed in this function. This means that
rollback-1 will undo the last commit, rollback+0 will undo all
commits, etc.
The operations to perform can also be specified either in the
template file itself, or in a .load file (see Device.load). An
example of the template file syntax:
!op=(load,commit)
This test is using the device_raw fixture, which means that the
device state is not restored after test.
Args:
device_raw: device fixture
template: name of the template file
op[0..n]: operations to be performed:
"": no-op
load: load file
commit: commit configuration
compare-config: compare configuration
check-sync: check that device is in sync
rollback[+-no]: rollback configuration
Default: ["load", "commit", "compare-config"]
Returns:
nothing
"""
if op is None:
op = ["load", "commit", "compare-config"]
_drned_single_file(device_raw, template, op) | 5,327,207 |
def evaluate_models_exploratory(X_normal:np.ndarray,
X_te:np.ndarray,
X_adv_deepfool:np.ndarray,
X_adv_fgsm:np.ndarray,
X_adv_pgd:np.ndarray,
X_adv_dt:np.ndarray,
Y:np.ndarray,
Y_aml:np.ndarray,
perfs:dict,
contamination:float=.05,
degree:float=3.,
support_fraction:float=.5):
"""
"""
MODELS = [IsolationForest(contamination=contamination),
OneClassSVM(kernel='poly', degree=degree),
EllipticEnvelope(contamination=contamination, support_fraction=support_fraction),
LocalOutlierFactor(contamination=contamination)]
MODELS_NAMES = ['if', 'svm', 'ee', 'lo']
ATTACKS = ['baseline', 'deepfool', 'fgsm', 'pgd', 'dt']
for model, model_name in zip(MODELS, MODELS_NAMES):
# fit the model on the normal data
model.fit(X_normal)
# if we are running the local outlier factor then we need to set the novelty bit
# in the class
if hasattr(model, 'novelty'):
model.novelty = True
#Y_hat, Y_deepfool, Y_fgsm, Y_pgd, Y_dt
outputs = model.predict(X_te), model.predict(X_adv_deepfool), \
model.predict(X_adv_fgsm), model.predict(X_adv_pgd), model.predict(X_adv_dt)
for y_hat, attack_type in zip(outputs, ATTACKS):
if attack_type == 'baseline':
labels = Y
else:
labels = Y_aml
acc, fs, tpr, tnr, mcc = get_performance(y_true=labels, y_hat=y_hat)
perfs[''.join(['accs_', model_name, '_', attack_type])] += acc
perfs[''.join(['fss_', model_name, '_', attack_type])] += fs
perfs[''.join(['tprs_', model_name, '_', attack_type])] += tpr
perfs[''.join(['tnrs_', model_name, '_', attack_type])] += tnr
perfs[''.join(['mccs_', model_name, '_', attack_type])] += mcc
return perfs | 5,327,208 |
def REMA_mosaic_r1_1_tile(dir_REMA,tile_name,dem_out,filter_params=None,format_out='GTiff',tgt_EPSG=3031,tgt_res=None,nodata_out=-9999,interp_method=None,geoid=False,tag_lonlat_tile=False,path_tile_index=None,tag_merge=False,tag_clip=False):
"""
:param dir_REMA: path to parent directory "8m" containing subdirectories of tar.gz archives (native FTP architecture)
:param tile_name: either REMA tile name or 1x1° lat/lon tile name (SRTMGL1/classic naming convention)
:param dem_out: path to DEM out file
:param filter_params: filtering with REMA ERR file using rastlib.filter_nanarray function
:param format_out: output format, GDAL naming (e.g.: 'GTiff','HDF4', ...) ; see: https://www.gdal.org/formats_list.html
:param tgt_EPSG: EPSG of output projection
:param tgt_res: output resolution, GDAL naming [xres, yres]
:param nodata_out: output no-data value
:param interp_method: resampling method, GDAL naming 'bilinear', 'neir', 'cubic', etc..
:param geoid: True, converts to geoid if is ellipsoid; False converts to ellipsoid if is geoid
:param tag_lonlat_tile: True if tile_name follows SRTMGL1 tile naming, False if tile_name follows REMA tile naming
:param path_tile_index: if tile_name is REMA format, specify path to ESRI REMA Tile Index
:param tag_merge: if tile_name is REMA format, True to merge all ArcticDEM tiles to the 1x1° lat/lon extent
:param tag_clip: if tile_name is REMA format, True to clip the 5x5° tile to the 1x1° lat/lon extent of tile_name
:return:
REMA release 1.1 product: ref:https://www.pgc.umn.edu/data/rema/
Processing for 8m mosaic
(100m, 500m and 1km versions are bundled in one .tif file)
Tile name and processing is REMA tile naming convention by default
Provide path to ESRI tile index file to use 1x1° lat/lon tiles and SRTMGL1 naming convention
OPTIMAL DIRECTORY ARCHITECTURE: point to "8m" folder of similar architecture than: ftp://ftp.data.pgc.umn.edu/elev/dem/setsm/REMA/mosaic/v1.0
"""
# 1/ LOCATE TILE
if not tag_lonlat_tile:
subtile_dir=os.path.join(dir_REMA,tile_name)
tile_tar_gz_list=[os.path.join(subtile_dir,tar_file) for tar_file in os.listdir(subtile_dir) if tar_file.endswith('.tar.gz')]
else:
lat_tile, lon_tile = SRTMGL1_naming_to_latlon(tile_name)
extent = [lat_tile, lon_tile, lat_tile + 1, lon_tile + 1]
# feature name in REMA_Tile_Index_Rel1.1
feat_name = 'TILE'
subtile_name_list=list_shp_field_inters_extent(path_tile_index, feat_name, extent,4326)
subtile_dir_list = [os.path.join(dir_REMA,tile) for tile in subtile_name_list]
tile_tar_gz_list=[]
for i in range(len(subtile_dir_list)):
tile_tar_gz_list=tile_tar_gz_list+[os.path.join(subtile_dir_list[i],tar_file) for tar_file in os.listdir(subtile_dir_list[i]) if tar_file.endswith('.tar.gz')]
# 2/ EXTRACT TILE
tmp_dir = create_tmp_dir_for_outfile(dem_out)
list_tmp_dem = [os.path.join(tmp_dir, os.path.splitext(os.path.basename(tile_tar_gz))[0]+'_dem.tif') for tile_tar_gz in tile_tar_gz_list]
for tile_tar_gz in tile_tar_gz_list:
extract_file_from_tar_gz(tile_tar_gz,os.path.splitext(os.path.basename(tile_tar_gz))[0]+'_dem.tif',list_tmp_dem[tile_tar_gz_list.index(tile_tar_gz)])
# list_tmp_err = [tmp_dir + os.path.splitext(os.path.basename(tile_tar_gz))[0]+'_err.tif' for tile_tar_gz in tile_tar_gz_list]
for tile_tar_gz in tile_tar_gz_list:
extract_file_from_tar_gz(tile_tar_gz,os.path.splitext(os.path.basename(tile_tar_gz))[0]+'_err.tif',list_tmp_dem[tile_tar_gz_list.index(tile_tar_gz)])
list_tmp_dem_tomerge=[]
for tmp_dem in list_tmp_dem:
# 3/ FILTER TILE
if filter_params is not None:
tmp_err=tmp_dem[:-8]+'_err.tif'
err = read_nanarray(tmp_err)
_, filt = filter_nanarray(err, filter_params[0], filter_params[1], filter_params[2])
dem = read_nanarray(tmp_dem)
dem_filtered = np.array(dem)
dem_filtered[filt] = np.NaN
update_nanarray(tmp_dem, dem_filtered)
# 4/ REPROJECT TILE
# raw data is GeoTiff, 3031, 1 arc-sec and -9999 nodata_out
if format_out == 'GTiff' and tgt_EPSG == 3031 and tgt_res is None and nodata_out is -9999:
tmp_dem_proj = tmp_dem
else:
tmp_dem_proj = os.path.join(tmp_dir, os.path.splitext(os.path.basename(tmp_dem))[0] + '_proj.tif')
warp_defaultUTM(tmp_dem, tmp_dem_proj, format_out, 3031, tgt_EPSG, tgt_res, nodata_out, interp_method)
# 5/ ELLIPSOID OR GEOID
# raw data is ellipsoid WGS84
if geoid:
tmp_dem_geoid= os.path.join(tmp_dir, os.path.splitext(os.path.basename(tmp_dem))[0] + '_geoid.tif')
ellipsoid_to_geoid(tmp_dem_proj,tmp_dem_geoid)
else:
tmp_dem_geoid=tmp_dem_proj
list_tmp_dem_tomerge.append(tmp_dem_geoid)
# 6/ MERGE ALL TILES
tmp_dem_merged=os.path.join(tmp_dir,tile_name+'_merged.tif')
if tag_merge:
merge_rast_list(list_tmp_dem_tomerge,tmp_dem_merged)
else:
shutil.copytree(tmp_dir,os.path.join(os.path.dirname(dem_out),tile_name+'_subtiles'))
# 7/ CLIP TO TILE EXTENT
if not tag_clip:
tmp_dem_clipped = os.path.join(tmp_dir,tile_name+'_clipped.tif')
lat,lon= SRTMGL1_naming_to_latlon(tile_name)
clip_rast_to_extent(tmp_dem_merged, tmp_dem_clipped, [lat, lon, lat + 1, lon + 1], 4326)
else:
tmp_dem_clipped = tmp_dem_merged
shutil.move(tmp_dem_clipped,dem_out)
remove_tmp_dir_for_outfile(dem_out) | 5,327,209 |
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home) | 5,327,210 |
def get_files_endpoint(entity_name):
"""
Given an entity name, generate a flask_restful `Resource` class. In
`create_api_endpoints()`, these generated classes are registered with the API e.g.
`api.add_resource(get_files_endpoint("Dataset"), "/datasets/<string:pid>/files")`
:param entity_name: Name of the entity
:type entity_name: :class:`str`
:return: Generated endpoint class
"""
class FilesEndpoint(Resource):
@search_api_error_handling
def get(self, pid):
filters = get_filters_from_query_string("search_api", entity_name)
log.debug("Filters: %s", filters)
return get_files(entity_name, pid, filters), 200
get.__doc__ = f"""
---
summary: Get {entity_name}s for the given Dataset
description: Retrieves a list of {entity_name} objects for a given Dataset
object
tags:
- Dataset
parameters:
- in: path
required: true
name: pid
description: The pid of the entity to retrieve
schema:
oneOf:
- type: string
- FILTER
responses:
200:
description: Success - returns {entity_name}s for the given Dataset
object that satisfy the filter
content:
application/json:
schema:
type: array
items:
$ref:
'#/components/schemas/{entity_name}'
400:
description: Bad request - Something was wrong with the request
404:
description: No such record - Unable to find a record in ICAT
"""
FilesEndpoint.__name__ = entity_name
return FilesEndpoint | 5,327,211 |
def get_name(path):
"""get the name from a repo path"""
return re.sub(r"\.git$", "", os.path.basename(path)) | 5,327,212 |
def abs_path(*paths):
"""Get the absolute path of the given file path.
Args:
*paths: path parts.
Returns:
An abs path string.
"""
return os.path.abspath(os.path.join(script_dir, '..', *paths)) | 5,327,213 |
def is_valid_shipping_method(
checkout: Checkout,
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
subtotal: Optional["TaxedMoney"] = None,
):
"""Check if shipping method is valid and remove (if not)."""
if not checkout.shipping_method:
return False
if not checkout.shipping_address:
return False
valid_methods = get_valid_shipping_methods_for_checkout(
checkout, lines, discounts, subtotal=subtotal
)
if valid_methods is None or checkout.shipping_method not in valid_methods:
clear_shipping_method(checkout)
return False
return True | 5,327,214 |
async def load_gdq_index():
"""
Returns the GDQ index (main) page, includes donation totals
:return: json object
"""
return (await load_gdq_json(f"?type=event&id={config['event_id']}"))[0]['fields'] | 5,327,215 |
def deleteRestaurantForm(r_id):
"""Create form to delete existing restaurant
Args:
r_id: id extracted from URL
"""
session = createDBSession()
restaurant = session.query(Restaurant).get(r_id)
if restaurant is None:
output = ("<p>The restaurant you're looking for doesn't exist.<br>"
"<a href='/restaurants'>Back to listings</a></p>")
else:
output = ("<form method='POST' enctype='multipart/form-data' "
"action='/restaurants/%s/delete'>"
"<h2>Delete %s restaurant</h2><p>Are you sure? "
"<input type='hidden' name='restaurantID' value='%s'>"
"<input type='submit' value='Delete'></p></form>"
"<p><a href='/restaurants'>No, take me back to the listings"
"</a></p>") % (restaurant.id, restaurant.name, restaurant.id)
return output | 5,327,216 |
def password_account(data):
"""Modify account password.
etcd_key: <ETCD_PREFIX>/account/<name>
data: {'name': , 'pass': , 'pass2': }
"""
t_ret = (False, '')
s_rsc = '{}/account/{}'.format(etcdc.prefix, data['name'])
try:
r = etcdc.read(s_rsc)
except etcd.EtcdKeyNotFound as e:
log.error(e)
return (False, 'EtcdKeyNotFound')
d = ast.literal_eval(r.value)
# check data['pass'] is valid.
(b_ret, s_msg) = _pass_validate(data)
if not b_ret:
log.debug((b_ret, s_msg))
return (b_ret, s_msg)
# password is okay. go head.
new_data = dict()
s_modified = datetime.utcnow().isoformat() + 'Z'
data['modifiedAt'] = s_modified
# Put d['pass'] to oldpass entry.
if 'oldpass' in d:
new_data['oldpass'].append(d['pass'])
else:
new_data['oldpass'] = [d['pass']]
# Create new hashed password.
bytes_salt = bytes(d['salt'], 'utf-8')
new_data['pass'] = bcrypt.hashpw(str.encode(data['pass']),
bytes_salt).decode()
d.update(new_data.items())
s_rsc = '{}/account/{}'.format(etcdc.prefix, data['name'])
try:
etcdc.write(s_rsc, d, prevExist=True)
except etcd.EtcdKeyNotFound as e:
log.error(e)
t_ret = (False, e)
else:
t_ret = (True, 'user {} password is modified.'.format(data['name']))
finally:
return t_ret | 5,327,217 |
def split_pkg(pkg):
"""nice little code snippet from isuru and CJ"""
if not pkg.endswith(".tar.bz2"):
raise RuntimeError("Can only process packages that end in .tar.bz2")
pkg = pkg[:-8]
plat, pkg_name = pkg.split("/")
name_ver, build = pkg_name.rsplit("-", 1)
name, ver = name_ver.rsplit("-", 1)
return plat, name, ver, build | 5,327,218 |
def depart(visitor: DocxTranslator, node: None):
"""Finish processing note node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
visitor.p_style.pop()
visitor.p_level -= 1 | 5,327,219 |
def gaussian(k, x):
""" gaussian function
k - coefficient array, x - values """
return k[2] * np.exp( -(x - k[0]) * (x - k[0]) / (2 * k[1] * k[1])) + k[3] | 5,327,220 |
def get_physical_locator(context, record_dict):
"""Get physical locator that matches the supplied uuid."""
try:
query = context.session.query(models.PhysicalLocators)
physical_locator = query.filter_by(
uuid=record_dict['uuid'],
ovsdb_identifier=record_dict['ovsdb_identifier']).one()
except exc.NoResultFound:
LOG.debug('no physical locator found for %s and %s',
record_dict['uuid'],
record_dict['ovsdb_identifier'])
return
return physical_locator | 5,327,221 |
def coins(n, arr):
"""
Counting all ways e.g.: (5,1) and (1,5)
"""
# Stop case
if n < 0:
return 0
if n == 0:
return 1
ways = 0
for i in range(0, len(arr)):
ways += coins(n - arr[i], arr)
return ways | 5,327,222 |
def function_tracing_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[Lambda.2] Lambda functions should use active tracing with AWS X-Ray"""
iterator = paginator.paginate()
for page in iterator:
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
# create env vars
for function in page["Functions"]:
functionName = str(function["FunctionName"])
lambdaArn = str(function["FunctionArn"])
# This is a passing check
if str(function["TracingConfig"]["Mode"]) == "Active":
finding = {
"SchemaVersion": "2018-10-08",
"Id": lambdaArn + "/lambda-active-tracing-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": lambdaArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[Lambda.2] Lambda functions should use active tracing with AWS X-Ray",
"Description": "Lambda function "
+ functionName
+ " has Active Tracing enabled.",
"Remediation": {
"Recommendation": {
"Text": "To configure your Lambda functions send trace data to X-Ray refer to the Using AWS Lambda with AWS X-Ray section of the Amazon Lambda Developer Guide",
"Url": "https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsLambdaFunction",
"Id": lambdaArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"AwsLambdaFunction": {
"FunctionName": functionName,
"TracingConfig": {
"Mode": str(function["TracingConfig"]["Mode"])
}
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": lambdaArn + "/lambda-active-tracing-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": lambdaArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[Lambda.2] Lambda functions should use active tracing with AWS X-Ray",
"Description": "Lambda function "
+ functionName
+ " does not have Active Tracing enabled. Because X-Ray gives you an end-to-end view of an entire request, you can analyze latencies in your Functions and their backend services. You can use an X-Ray service map to view the latency of an entire request and that of the downstream services that are integrated with X-Ray. Refer to the remediation instructions if this configuration is not intended.",
"Remediation": {
"Recommendation": {
"Text": "To configure your Lambda functions send trace data to X-Ray refer to the Using AWS Lambda with AWS X-Ray section of the Amazon Lambda Developer Guide",
"Url": "https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsLambdaFunction",
"Id": lambdaArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"AwsLambdaFunction": {
"FunctionName": functionName,
"TracingConfig": {
"Mode": str(function["TracingConfig"]["Mode"])
}
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding | 5,327,223 |
def compute_diff(old, new):
"""
Compute a diff that, when applied to object `old`, will give object
`new`. Do not modify `old` or `new`.
"""
if not isinstance(old, dict) or not isinstance(new, dict):
return new
diff = {}
for key, val in new.items():
if key not in old:
diff[key] = val
elif old[key] != val:
diff[key] = compute_diff(old[key], val)
for key in old:
if key not in new:
diff[key] = "$delete"
return diff | 5,327,224 |
def test_init_params():
"""
armedcheckswitch.py: Test __init__() with different parameters
"""
s1 = ArmedCheckSwitch(switched=True, armed=False)
assert s1.is_switched() == True
assert s1.is_armed() == False
s2 = ArmedCheckSwitch(switched=False, armed=False)
assert s2.is_switched() == False
assert s2.is_armed() == False
s3 = ArmedCheckSwitch(armed=True, switched=True)
assert s3.is_switched() == True
assert s3.is_armed() == True
s4 = ArmedCheckSwitch(armed=True, switched=False)
assert s4.is_switched() == False
assert s4.is_armed() == True | 5,327,225 |
def append_and_output_per_file(args, dataset_name, eval_results, results_list):
"""
append results to a list, and output them; these results are associated to a single file
:param args: the command line arguments, containing several options
:param dataset_name: name of the dataset (or file) to which these results refer to
:param eval_results: the results on this file
:param results_list: the full lists of results in several files; eval_results appended here
:return: nothing
"""
ade, ade_no_len, fde, contribution_ade, contribution_fde, statistics = eval_results.get()
results_list.append(eval_results)
if args.test_files_individually:
print(f"File {dataset_name}:{os.linesep}ADE={ade:.3f}; Without length influencing: {ade_no_len:.3f}"
f"{os.linesep}FDE={fde:.3f}")
__compute_statistics__(args, statistics, dataset_name)
print("") | 5,327,226 |
def get_file_to_dict(fliepath,splitsign,name):
"""
读取对应路径的文件,如果没有则创建
返回dict,splitsign为分隔符
"""
if os.path.exists(fliepath+name+'.txt'):
dict = {}
with open(fliepath+name+'.txt',mode='r',encoding='utf-8') as ff:
try:
list = ff.read().splitlines()
for l in list:
s = str(l).split(splitsign,1)
dict[s[0].strip()] = s[1].strip()
except:
dict = {}
ff.close()
else:
with open(fliepath+name+'.txt', mode='w', encoding='utf-8') as ff:
dict = {}
ff.close()
return dict | 5,327,227 |
def main():
"""Start the server then tick in loop.
"""
address = "/run/com_handler.sock"
try:
os.unlink(address)
except:
if os.path.exists(address):
raise
socket.setdefaulttimeout(0.01)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(address)
sock.listen(10)
state = server_state()
while True:
tick(sock, state) | 5,327,228 |
def assign_nuts1_to_lad(c, lu=_LAD_NUTS1_LOOKUP):
"""Assigns nuts1 to LAD"""
if c in lu.keys():
return lu[c]
elif c[0] == "S":
return "Scotland"
elif c[0] == "W":
return "Wales"
elif c[0] == "N":
return "Northern Ireland"
else:
return np.nan | 5,327,229 |
async def get_all_persons():
"""List of all people."""
with Session(DB.engine) as session:
persons = session.query(Person).all()
return [p.to_dict() for p in persons] | 5,327,230 |
def main():
"""
Main method of the sample Tango client.
"""
#This is for client object creation. Here, "sys/tg_test/1" is the fqdn of the device.
print("Creating client of TangoTest device.")
client_sample = TangoClient("sys/tg_test/1")
#This invokes command on the device server in synchronous mode.
print("Sending command in synchronous mode.")
client_sample.send_command("DevDouble", 20)
#This invokes command on the device server in asynchronous mode.
#devdouble_cb is the callback function that gets executed after completion of the command execution.
print("Sending command in asynchronous mode.")
client_sample.send_command_async("DevDouble", 40, devdouble_cb)
#This reads the value to the given attribute.
print("Reading attribute.")
print(client_sample.get_attribute("ampli"))
#This writes the value to the given attribute with the value.
print("Writing value to attribute.")
client_sample.set_attribute("ampli", 100)
print(client_sample.get_attribute("ampli"))
#This subscribes to the event of the attribute and return the event id.
#ampli_cb is the attribute callback function which will be executed after successful attribute calling.
print("Subscribing attribute change event.")
eventid = client_sample.subscribe_attribute("ampli", ampli_cb )
#This unsubscribes to the event of attribute of the particular event id generated.
print("Unsubscribing attribute change event.")
client_sample.unsubscribe_attribute(eventid) | 5,327,231 |
def pivot_pull(pull: List[Dict[str, str]]):
"""Pivot so columns are measures and rows are dates."""
parsed_pull = parse_dates(pull)
dates = sorted(list(set(row["sample_date"] for row in parsed_pull)))
pivot = list()
for date in dates:
row = {"sample_date": date}
observations = [row for row in parsed_pull if row["sample_date"] == date]
for measure in MEASUREMENT_GROUPS:
observation = [row for row in observations if row["parameter"] == measure]
if len(observation) != 1:
raise ValueError(
"Should only have one value per date observation combo."
)
row[measure] = observation[0]["numeric_result"]
pivot.append(row)
return pivot | 5,327,232 |
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.
t1 = Time(1e11, format='cxcsec') + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format='cxcsec')
t3 = Time(dt_tiny_sec, 1e11, format='cxcsec')
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2 | 5,327,233 |
def evaluate(data,
model_path: str,
dest_path: str,
neighborhood_size: int,
batch_size: int,
endmembers_path: str,
use_ensemble: bool = False,
ensemble_copies: int = 1,
noise_params: str = None,
voting: str = 'mean',
voting_model: str = None,
voting_model_params: str = None,
seed: int = 0):
"""
Function for evaluating the trained model for the unmixing problem.
:param model_path: Path to the model.
:param data: Either path to the input data or the data dict.
:param dest_path: Path to the directory to store the calculated metrics.
:param neighborhood_size: Size of the spatial patch.
:param batch_size: Size of the batch for inference.
:param endmembers_path: Path to the endmembers file containing
average reflectances for each class.
Used only when use_unmixing is set to True.
:param use_ensemble: Boolean indicating whether
to use ensembles functionality.
:param ensemble_copies: Number of copies of the original model to create.
:param noise_params: Parameters for the noise when creating
copies of the base model. Those can be for instance the mean,
or standard deviation of the noise.
:param voting: Method of ensemble voting. If 'booster',
employs a new model, which is trained on the
ensemble predictions on the training set. Else if 'mean', averages
the predictions of all models, without any weights.
:param voting_model: Type of the model to use when the voting
argument is set to 'booster'. This indicates, that a new model
is trained on the ensemble's predictions on the learning set,
to leverage the quality of the regression. Supported models are:
SVR (support vector machine for regression), RFR (random forest
for regression) and DTR (decision tree for regression).
:param voting_model_params: Parameters of the voting model.
Used only when the type of voting is set to 'booster'.
Should be specified analogously to the noise injection parameters
in the 'noise' module.
:param seed: Parameter used for the experiments reproduction.
"""
model_name = os.path.basename(model_path)
model = tf.keras.models.load_model(
model_path, compile=True,
custom_objects={metric.__name__: metric for metric in
UNMIXING_TRAIN_METRICS[model_name]})
test_dict = data[enums.Dataset.TEST]
min_, max_ = io.read_min_max(os.path.join(
os.path.dirname(model_path), 'min-max.csv'))
transformations = [transforms.MinMaxNormalize(min_=min_, max_=max_)]
transformations += [t(**{'neighborhood_size': neighborhood_size}) for t
in UNMIXING_TRANSFORMS[model_name]]
test_dict_transformed = transforms.apply_transformations(test_dict.copy(),
transformations)
if 'dcae' in model_name:
model.pop()
if use_ensemble:
model = Ensemble(model, voting=voting)
noise_params = yaml.load(noise_params)
model.generate_models_with_noise(copies=ensemble_copies,
mean=noise_params['mean'],
std=noise_params['std'],
seed=seed)
if voting == 'booster':
train_dict_tr = data[enums.Dataset.TRAIN].copy()
train_dict_tr = transforms.apply_transformations(train_dict_tr,
transformations)
train_probabilities = model.predict_probabilities(
train_dict_tr[enums.Dataset.DATA])
model.train_ensemble_predictor(
train_probabilities,
data[enums.Dataset.TRAIN][enums.Dataset.LABELS],
predictor=voting_model,
model_params=voting_model_params)
predict = timeit(model.predict)
y_pred, inference_time = predict(
test_dict_transformed[enums.Dataset.DATA],
batch_size=batch_size)
model_metrics = calculate_unmixing_metrics(**{
'endmembers': np.load(endmembers_path)
if endmembers_path is not None else None,
'y_pred': y_pred,
'y_true': test_dict[enums.Dataset.LABELS],
'x_true': get_central_pixel_spectrum(
test_dict_transformed[enums.Dataset.DATA],
neighborhood_size)
})
model_metrics['inference_time'] = [inference_time]
io.save_metrics(dest_path=dest_path,
file_name=enums.Experiment.INFERENCE_METRICS,
metrics=model_metrics) | 5,327,234 |
def _location_sensitive_score(W_query, W_fil, W_keys):
"""Impelements Bahdanau-style (cumulative) scoring function.
This attention is described in:
J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-
gio, “Attention-based models for speech recognition,” in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577–585.
#############################################################################
hybrid attention (content-based + location-based)
f = F * α_{i-1}
energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))
#############################################################################
Args:
W_query: Tensor, shape "[batch_size, 1, attention_dim]" to compare to location features.
W_location: processed previous alignments into location features, shape "[batch_size, max_time, attention_dim]"
W_keys: Tensor, shape "[batch_size, max_time, attention_dim]", typically the encoder outputs.
Returns:
A "[batch_size, max_time]" attention score (energy)
"""
# Get the number of hidden units from the trailing dimension of keys
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1]
v_a = tf.get_variable(
"attention_variable_projection", shape=[num_units], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable(
"attention_bias", shape=[num_units], dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2]) | 5,327,235 |
def dh_noConv( value, pattern, limit ):
"""decoding helper for a single integer value, no conversion, no rounding"""
return dh( value, pattern, encNoConv, decSinglVal, limit ) | 5,327,236 |
def chooseFile():
"""
Parameters
----------
None
No parameters are specified.
Returns
-------
filenames: tuple
A tuple that contains the list of files to be loaded.
"""
## change the wd to dir containing the script
curpath = os.path.dirname(os.path.realpath(__file__))
os.chdir(curpath)
root = Tk()
root.withdraw()
filenames = askopenfilename(parent= root, filetypes = (("CSV files", "*.csv"), ("Text files", "*.txt"), ("All files", "*.*")), multiple= True)
if len(filenames) == 1:
print len(filenames), " file is loaded."
elif len(filenames) > 1:
print len(filenames), " files are loaded."
else:
print "No files are loaded."
return filenames | 5,327,237 |
def downgrade():
"""Unapply Add scheduling_decision to DagRun and DAG"""
with op.batch_alter_table('dag_run', schema=None) as batch_op:
batch_op.drop_index('idx_last_scheduling_decision')
batch_op.drop_column('last_scheduling_decision')
batch_op.drop_column('dag_hash')
with op.batch_alter_table('dag', schema=None) as batch_op:
batch_op.drop_index('idx_next_dagrun_create_after')
batch_op.drop_column('next_dagrun_create_after')
batch_op.drop_column('next_dagrun')
batch_op.drop_column('concurrency')
batch_op.drop_column('has_task_concurrency_limits') | 5,327,238 |
def reward_strategy(orig_reward, actualperf, judgeperf, weight={'TP':1, 'TN': 1, 'FP': -1, 'FN':-1}):
"""
"""
assert list(weight.keys()) == ['TP', 'TN', 'FP', 'FN'], "Please assign weights to TP, TN, FP and FN."
# assert sum(weight.values()) == 0, "Summation of weight values needs to be 0."
if actualperf & judgeperf:
cond = 'TP'
elif (not actualperf) & (not judgeperf):
cond = 'TN'
elif (not actualperf) & judgeperf:
cond = 'FP'
elif actualperf & (not judgeperf):
cond = 'FN'
else:
pass
reward = orig_reward + weight[cond]
reward = round(reward, 2)
return reward | 5,327,239 |
def test_enqueue(dog_q):
"""test enqueue"""
dog_q.enqueue('cat')
assert dog_q.newest.val == 'cat'
assert dog_q._len == 6 | 5,327,240 |
def get_user_ids_from_primary_location_ids(domain, location_ids):
"""
Returns {user_id: primary_location_id, ...}
"""
result = (
UserES()
.domain(domain)
.primary_location(location_ids)
.non_null('location_id')
.fields(['location_id', '_id'])
.run().hits
)
ret = {}
for r in result:
if 'location_id' in r:
loc = r['location_id']
ret[r['_id']] = loc
return ret | 5,327,241 |
def test_environment_repeated():
"""Check the last value of repeated environment variables is used..."""
with build_pypgf(srcdir / "repeated", "basic.py") as res:
assert res.returncode == 0, "Environment variables not set correctly." | 5,327,242 |
def log_prov_es(job, prov_es_info, prov_es_file):
"""Log PROV-ES document. Create temp PROV-ES document to populate
attributes that only the worker has access to (e.g. PID)."""
# create PROV-ES doc to generate attributes that only verdi know
ps_id = "hysds:%s" % get_uuid(job['job_id'])
bundle_id = "hysds:%s" % get_uuid('bundle-%s' % job['job_id'])
doc = ProvEsDocument()
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# create sofware agent
sa_label = "hysds:pge_wrapper/%s/%d/%s" % (job['job_info']['execute_node'],
job['job_info']['pid'],
datetime.utcnow().isoformat())
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(job['job_info']['pid']),
job['job_info']['execute_node'],
role=job.get('username', None),
label=sa_label, bundle=bndl)
# create processStep
doc.processStep(ps_id, job['job_info']['cmd_start'],
job['job_info']['cmd_end'], [], sa_id,
None, [], [], bundle=bndl,
prov_type="hysds:%s" % job['type'])
# get json
pd = json.loads(doc.serialize())
# update software agent and process step
if 'bundle' in prov_es_info:
if len(prov_es_info['bundle']) == 1:
bundle_id_orig = list(prov_es_info['bundle'].keys())[0]
# update software agent
prov_es_info['bundle'][bundle_id_orig].setdefault(
'agent', {}).update(pd['bundle'][bundle_id]['agent'])
# update wasAssociatedWith
prov_es_info['bundle'][bundle_id_orig].setdefault(
'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith'])
# update activity
if 'activity' in prov_es_info['bundle'][bundle_id_orig]:
if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1:
ps_id_orig = list(
prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0]
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]:
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']:
if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['bundle'][bundle_id_orig]['activity'].update(
pd['bundle'][bundle_id]['activity'])
else:
prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity']
else:
# update software agent
prov_es_info.setdefault('agent', {}).update(pd['agent'])
# update wasAssociatedWith
prov_es_info.setdefault('wasAssociatedWith', {}).update(
pd['wasAssociatedWith'])
# update process step
if 'activity' in prov_es_info:
if len(prov_es_info['activity']) == 1:
ps_id_orig = list(prov_es_info['activity'].keys())[0]
prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime']
prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime']
prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['activity'][ps_id_orig]:
prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['wasAssociatedWith']:
if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['activity'].update(pd['activity'])
else:
prov_es_info['activity'] = pd['activity']
# write prov
with open(prov_es_file, 'w') as f:
json.dump(prov_es_info, f, indent=2) | 5,327,243 |
def get_next_states(state: State):
"""Create new states, but prioritize the following:
asdjkgnmweormelfkmw
Prioritize nothing...
"""
out = []
# First we check hallways.
for i in HALLWAY_IND:
# Check if the room has any crabs
hall = state.rooms[i]
if hall.is_empty(): continue
# Get the crab
crab, crab_pos = hall.get_next()
# Get target room
target_room = enum2room[crab]
if state.rooms[target_room].is_empty():
# Wait, first we need to see if we can move it to the room
if i < target_room:
# Hallway is on the left of the room
left = i
right = target_room
else:
left = target_room
right = i
but_can_it_move = True
for j in range(left, right):
if j % 2:
continue
if j == i:
continue
if state.rooms[j].has_space():
continue
but_can_it_move = False
break
if but_can_it_move:
# We can move the crab!
new_state = _deepcopy(state)
# Calculate the new cost
# The path is the current position of the crab in the current
# hallway, then the position in the target room and finaly
# the move between the hallways and rooms
target_position = state.rooms[target_room].get_position()
move = abs(target_room - i)
new_cost = (crab_pos + target_position + move) * crab
# Apply changes to the state
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[target_room].pos[target_position - 1] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
for i in ROOMS_INDIC:
# Check if room is complete
room = state.rooms[i]
if room.is_complete():
continue
if room.is_empty():
continue
# The room is not complete so we have to move the topmost crab out.
crab, crab_pos = room.get_next()
# See where it has to go
target_room = enum2room[crab]
# See if target room is empty so we can directly move in to the
# target room
if state.rooms[target_room].is_empty():
if i < target_room:
left = i
right = target_room
else:
left = target_room
right = i
but_can_it_move = True
for j in range(left, right):
if j % 2:
# Other rooms
continue
if j == i:
continue
if state.rooms[j].has_space():
continue
but_can_it_move = False
break
if but_can_it_move:
new_state = _deepcopy(state)
target_position = state.rooms[target_room].get_position()
# Calculate the new state
move = abs(target_room - i) + 1
new_cost = (crab_pos + move + target_position) * crab
# Apply changes
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[target_room].pos[target_position - 1] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
# Well now let's see if we can move to a halway
for j in HALLWAY_IND:
# We fill all the hallways. All of them...
hall = state.rooms[j]
if hall.has_space():
# We can move it here.
but_can_it_move = True
if i < j:
left = i
right = j
else:
left = j
right = i
for l in range(left, right):
if l == j: # Ignore target hall
continue
if l % 2: # Ignore rooms
continue
if state.rooms[l].is_empty():
continue
but_can_it_move = False
break
if but_can_it_move:
# Fill all possible positions for this hallway.
for k in range(hall.s -1, -1, -1):
if hall.pos[k]:
continue
new_state = _deepcopy(state)
move = abs(i - j)
new_cost = (crab_pos + k + 1 + move) * crab
# Make the change
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[j].pos[k] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
return out | 5,327,244 |
def _build_trainstep(fcn, projector, optimizer, strategy, temp=1, tau_plus=0, beta=0, weight_decay=0):
"""
Build a distributed training step for SimCLR or HCL.
Set tau_plus and beta to 0 for SimCLR parameters.
:model: Keras projection model
:optimizer: Keras optimizer
:strategy: tf.distribute.Strategy object
:temp: temperature parameter
:tau_plus: HCL class probability parameter
:beta: HCL concentration parameter
:weightdecay: L2 loss coefficient. 0 to disable
Returns a distributed training function
"""
trainvars = fcn.trainable_variables + projector.trainable_variables
def _step(x1, m1, x2, m2):
with tf.GradientTape() as tape:
loss = 0
# get replica context- we'll use this to aggregate embeddings
# across different GPUs
context = tf.distribute.get_replica_context()
#print("x,y:", x.shape, y.shape)
# run images through model and normalize embeddings. do this
# in three steps:
# 1) compute features with FCN (N, w, h, feature_dim)
# 2) compute segment-weighted features (N*num_samples, feature_dim)
# 3) compute projections z (N*num_samples, d)
x1 = fcn(x1, training=True)
hm1 = _prepare_embeddings(x1, m1)
z1 = tf.nn.l2_normalize(projector(hm1, training=True), 1)
x2 = fcn(x2, training=True)
hm2 = _prepare_embeddings(x2, m2)
z2 = tf.nn.l2_normalize(projector(hm2, training=True), 1)
# mask out all positive pairs where one mask or the other
# is empty
mask = tf.stop_gradient(_prepare_mask(m1, m2))
# aggregate projections across replicas. z1 and z2 should
# now correspond to the global batch size (gbs*num_samples, d)
z1 = context.all_gather(z1, 0)
z2 = context.all_gather(z2, 0)
print("z1,z2:", z1.shape, z2.shape)
mask = context.all_gather(mask, 0)
print("mask:", mask.shape)
with tape.stop_recording():
gbs = z1.shape[0]
negmask = _build_negative_mask(gbs)
# SimCLR loss case
if (tau_plus == 0)&(beta == 0):
softmax_prob, nce_batch_acc = _simclr_softmax_prob(z1, z2, temp, negmask)
# HCL loss case
elif (tau_plus > 0)&(beta > 0):
softmax_prob, nce_batch_acc = _hcl_softmax_prob(z1, z2, temp,
beta, tau_plus, negmask)
else:
assert False, "both tau_plus and beta must be nonzero to run HCL"
softmax_loss = tf.reduce_mean(-1*mask*tf.math.log(softmax_prob))
loss += softmax_loss
if weight_decay > 0:
l2_loss = compute_l2_loss(fcn) + compute_l2_loss(projector)
loss += weight_decay*l2_loss
else:
l2_loss = 0
grad = tape.gradient(loss, trainvars)
optimizer.apply_gradients(zip(grad, trainvars))
return {"loss":loss, "nt_xent_loss":softmax_loss,
"l2_loss":l2_loss,
"nce_batch_accuracy":nce_batch_acc}
@tf.function
def trainstep(x1, m1, x2, m2):
per_example_losses = strategy.run(_step, args=(x1, m1, x2, m2))
lossdict = {k:strategy.reduce(
tf.distribute.ReduceOp.MEAN,
per_example_losses[k], axis=None)
for k in per_example_losses}
return lossdict
return trainstep | 5,327,245 |
def main():
""" Main entry point of the app """
# logger.info("Logging for {__name__}.main()")
# If module will never be run as script from terminal (command line), then you can
# delete this block of code.
# Check to see if read_args() function exists (defined above). If so, read the command-line args.
if any (["read_args" in s () for s in [globals, locals]]):
# Double-check that read_args is a callable function.
if callable (read_args):
# 1. Pick method 1, 2 or 3; delete the methods you don't use.
# 2. Customize the method for your specific command-line args.
# Method 1: args as dict
args = read_args(return_dict = True)
# logger.info(args)
print('command line args returned as dict.')
for k, v in args.items():
print(k + ":", v)
# Method 2: args as Namespace
args = read_args(return_dict = False)
# logger.info(args)
print('command line args returned as Namespace.')
print('args.name: ', args.name)
# Method 3: read a single commandline arg directly to a variable
name_str = read_args().name
print('name_str: ', name_str)
# logger.info(name_str)
else:
args = [] | 5,327,246 |
def GatherToDataframe( session, analysis, version , save = True, **kwargs ):
"""
Load external data (pickle files mostly) into a session dataframe or series of session dataframes columns.
You can specify the analysis type and version of that analysis you want to get loaded and saved inside a sessiondataframe.
This function is destined to be used before calling a MultisessionDatabase (because that function just merges the data inside the sessiondaataframes).
Parameters
----------
session : int
session_number.
analysis : str
type of analysis to load on the session (must match an existing one in the .config file).
version : str
Version of that analysis (in case you ran it multiple times with different version numbers) example : 'V1' or 'V2'.
save : bool, optional
Save on disk (true) or only return the dataframe. The default is True.
**kwargs : TYPE
- reload : default False.
If the column supposed to hold the .piuckle file data already exists, the function returns. To avoid this behavior and reload all data, use reload = True.
- silent : default True
Print warnings (True) or not.
- all the kwargs allowed for SessionDataframe, used when loading the dataframes. See that function for more details :
- source : default None.
- sql_engine : default None.
- force : default False.
BE CAREFULL - READ ENTIRELY -
If True, the function will first regenerate a dataframe from mysql (erasing all data previously merged inside it)
and then GatherToDataframe will remerge the data from the current analysis and version.
If you wish to load several analysis types inside the same sessiondataframe,
you must not specify True to this argument after the first call or previously loaded data will be removed.
- castErrors : default False.
Returns
-------
SessionDataBase
A SessionDataframe with the data loaded inside it.
"""
import pyprind
from LibrairieVideoAna import PositionTrack
if isinstance(session , (int, np.integer) ):
SessionDataFrame = SessionDataframe(session, method = "new", **kwargs)
else :
SessionDataFrame = session
session = SessionDataFrame.identity["Session"]
level , column_names , filename_contruct , sublevel_folder , applies_to = ConstructName(SessionDataFrame, analysis)
col_found = True
for column_name in column_names :
if column_name != "" :
if not column_name in SessionDataFrame.columns:
SessionDataFrame.loc[:,column_name] = None
col_found = False
else :
col_found = False
if col_found and not kwargs.get("reload",False):
if not kwargs.get("silent",False):
print("Data already exist on a saved dataframe, returning")
return SessionDataFrame
if applies_to == "trial" :
bar = pyprind.ProgBar(SessionDataFrame.shape[0], track_time=True, title=f'Gathering {analysis}',bar_char='█',update_interval = 1)
for index, row in SessionDataFrame.iterrows():
bar.update()
input_path = os.path.join(SessionDataFrame.dirs[level], sublevel_folder , eval(filename_contruct))
if os.path.isfile(input_path):
if analysis == "ShapeMatch_trajectories" :
mesh = PositionTrack.LoadTrackerMesh(input_path, loadtype = "results")
if mesh is not None :
trajes = PositionTrack.GetTrajectoryResults(mesh)
if trajes is not None :
trajlist = []
for key in trajes.keys() :
trajlist.append(trajes[key])
#SessionDataFrame.loc[index,key] = trajes[key]
#SessionDataFrame.loc[index,key] = geometry.UPointCollection(trajes[key])
SessionDataFrame.loc[index,column_names[0]] = geometry.ULineCollection ( np.hstack( [ trajlist[0] ,trajlist[1] ] ) )
if save :
SessionDataFrame.save()
return SessionDataFrame
elif applies_to == "session":
input_path = os.path.join(SessionDataFrame.dirs[level], sublevel_folder , eval(filename_contruct))
with open(input_path,"rb") as f :
item1 = CustomUnpickler(f).load()
return item1
else :
raise NotImplementedError | 5,327,247 |
def predict(
gpu,
gpu_allow_growth,
ckpt_path,
mode,
batch_size,
log_dir,
sample_label,
config_path,
):
"""
Function to predict some metrics from the saved model and logging results.
:param gpu: str, which env gpu to use.
:param gpu_allow_growth: bool, whether to allow gpu growth or not
:param ckpt_path: str, where model is stored, should be like
log_folder/save/xxx.ckpt
:param mode: which mode to load the data ??
:param batch_size: int, batch size to perform predictions in
:param log_dir: str, path to store logs
:param sample_label:
:param config_path: to overwrite the default config
"""
logging.error("TODO sample_label is not used in predict")
# env vars
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "false" if gpu_allow_growth else "true"
# load config
config, log_dir = init(log_dir, ckpt_path, config_path)
dataset_config = config["dataset"]
preprocess_config = config["train"]["preprocess"]
preprocess_config["batch_size"] = batch_size
optimizer_config = config["train"]["optimizer"]
model_config = config["train"]["model"]
loss_config = config["train"]["loss"]
# data
data_loader = load.get_data_loader(dataset_config, mode)
if data_loader is None:
raise ValueError(
"Data loader for prediction is None. Probably the data dir path is not defined."
)
dataset = data_loader.get_dataset_and_preprocess(
training=False, repeat=False, **preprocess_config
)
# optimizer
optimizer = opt.get_optimizer(optimizer_config)
# model
model = build_model(
moving_image_size=data_loader.moving_image_shape,
fixed_image_size=data_loader.fixed_image_shape,
index_size=data_loader.num_indices,
labeled=dataset_config["labeled"],
batch_size=preprocess_config["batch_size"],
model_config=model_config,
loss_config=loss_config,
)
# metrics
model.compile(optimizer=optimizer)
# load weights
# https://stackoverflow.com/questions/58289342/tf2-0-translation-model-error-when-restoring-the-saved-model-unresolved-objec
model.load_weights(ckpt_path).expect_partial()
# predict
fixed_grid_ref = layer_util.get_reference_grid(
grid_size=data_loader.fixed_image_shape
)
predict_on_dataset(
dataset=dataset,
fixed_grid_ref=fixed_grid_ref,
model=model,
save_dir=log_dir + "/test",
)
data_loader.close() | 5,327,248 |
def get_environment():
""" Light-weight routine for reading the <Environment> block: does most of the work through side effects on PETRglobals """
ValidExclude = None
ValidInclude = None
ValidOnly = True
ValidPause = 0
#PETRglobals.CodeWithPetrarch1 = True
#PETRglobals.CodeWithPetrarch2 = False
line = fin.readline()
while len(line) > 0 and not line.startswith("<Environment>"): # loop through the file
line = fin.readline()
if len(line) == 0:
print("Can't find <Environment> block")
exit()
line = fin.readline()
while "</Environment>" not in line: # loop through the file
print(line[:-1])
if '<Verbfile' in line:
PETRglobals.VerbFileName = line[line.find(">") + 1:line.find("</")]
elif '<Actorfile' in line:
PETRglobals.ActorFileList = line[line.find(">") + 1:line.find("</")].split(',')
elif '<Agentfile' in line:
PETRglobals.AgentFileList = line[line.find(">") + 1:line.find("</")].split(',')
elif '<Discardfile' in line:
PETRglobals.DiscardFileName = line[line.find(">") + 1:line.find("</")]
elif '<PICOfile' in line:
PETRglobals.InternalCodingOntologyFileName = line[line.find(">") + 1:line.find("</")]
elif '<Include' in line:
ValidInclude = line[line.find(">") + 1:line.find("</")].split()
print('<Include> categories', ValidInclude)
if 'valid' in ValidInclude:
ValidOnly = True
ValidInclude.remove('valid')
elif '<Exclude' in line:
ValidExclude = line[line.find(">") + 1:line.find("</")].split()
print('<Exclude> categories', ValidExclude)
elif '<Pause' in line:
theval = line[line.find(">") + 1:line.find("</")]
if 'lways' in theval:
ValidPause = 1 # skip first char to allow upper/lower case
elif 'ever' in theval:
ValidPause = 2
elif 'top' in theval:
ValidPause = 3
line = fin.readline()
print(PETRglobals.VerbFileName, PETRglobals.ActorFileList[0], PETRglobals.AgentFileList[0], PETRglobals.DiscardFileName)
print(ValidInclude, ValidExclude)
print(ValidPause, ValidOnly)
return ValidInclude, ValidExclude, ValidPause, ValidOnly | 5,327,249 |
def test_filtering_pipeline_ml(
mocker,
dummy_context,
pipeline_with_tag,
pipeline_ml_with_tag,
tags,
from_nodes,
to_nodes,
node_names,
from_inputs,
):
"""When the pipeline is filtered by the context (e.g calling only_nodes_with_tags,
from_inputs...), it must return a PipelineML instance with unmodified inference.
We loop dynamically on the arguments of the function in case of kedro
modify the filters.
"""
# dummy_context, pipeline_with_tag, pipeline_ml_with_tag are fixture in conftest
# remember : the arguments are iterable, so do not pass string directly (e.g ["training"] rather than training)
filtered_pipeline = dummy_context._filter_pipeline(
pipeline=pipeline_with_tag,
tags=tags,
from_nodes=from_nodes,
to_nodes=to_nodes,
node_names=node_names,
from_inputs=from_inputs,
)
filtered_pipeline_ml = dummy_context._filter_pipeline(
pipeline=pipeline_ml_with_tag,
tags=tags,
from_nodes=from_nodes,
to_nodes=to_nodes,
node_names=node_names,
from_inputs=from_inputs,
)
# PipelineML class must be preserved when filtering
# inference should be unmodified
# training pipeline nodes must be identical to kedro filtering.
assert isinstance(filtered_pipeline_ml, PipelineML)
assert filtered_pipeline_ml.inference == pipeline_ml_with_tag.inference
assert filtered_pipeline.nodes == filtered_pipeline_ml.nodes | 5,327,250 |
def create_variables_from_samples(sample_z_logits, sample_z_logp, sample_b, batch_index, sequence_index):
"""
Create the variables for RELAX control variate. Assumes sampled tokens come from decoder.
:param sample_z_logits: [B,T,V] tensor containing sampled processed logits created by stacking logits during
decoding loop of sampling process
:param sample_z_logp: [B,T,V] tensor containing sampled processed logp created by stacking logp during
decoding loop of sampling process
:param sample_b: the [B,T] tensor containing the H(z) indices (Gumbel-Max)
:param batch_index: [B,T] tensor of the batch size repeated for seq len
:param sequence_index: [B,T] tensor of range(0, seq len)
:return: z_tilde, and logp(b) for equation
"""
v = tf.random_uniform(shape=sample_z_logp.get_shape().as_list(),
minval=1e-8,
maxval=1,
dtype=tf.float32)
# create index tensor where b is the argmax, to use as indexer for substitution
b_new = tf.cast(tf.squeeze(sample_b, 0), tf.int64) # assumes sample_b = [BxT]
index_tensor_b = tf.expand_dims(tf.stack([batch_index, sequence_index, b_new], axis=1), 0)
v_b = tf.gather_nd(v, index_tensor_b) # values of v where b are the argmax indexes
update = -tf.log(-tf.log(v_b)) # for i == b
# create z_tilde as for the case where i != b
clipped_logit_probs = tf.clip_by_value(tf.math.softmax(sample_z_logits, axis=2), 1e-8, 1.0)
z_tilde = -tf.log(-tf.div(tf.log(v), clipped_logit_probs) - tf.expand_dims(tf.log(v_b), 2))
z_tilde = tf.tensor_scatter_nd_update(z_tilde, index_tensor_b, update)
logp_b = tf.gather_nd(sample_z_logp, index_tensor_b) # used in loss func
return z_tilde, logp_b | 5,327,251 |
def register_sensor(name):
"""
Registers a new sensor.
:param name The name of the sensor
"""
message = "REGISTER:" + name + '\n'
sock.sendall(message)
return | 5,327,252 |
def test_post_ar(client):
"""Assert that business for regular (not xpro) business is correct to spec."""
headers = {'content-type': 'application/json'}
fake_filing = ANNUAL_REPORT
fake_filing['filing']['business']['identifier'] = 'CP0001965'
fake_filing['filing']['annualReport']['annualGeneralMeetingDate'] = '2018-04-08'
fake_filing['filing']['annualReport']['annualReportDate'] = '2018-04-08'
rv = client.post('/api/v1/businesses/CP0001965/filings/annualReport',
data=json.dumps(fake_filing), headers=headers)
assert 201 == rv.status_code
is_valid, errors = validate(rv.json, 'filing', validate_schema=True)
if errors:
for err in errors:
print('\nERROR MESSAGE:')
print(err.message)
assert is_valid
ar_ids.append(str(rv.json['filing']['annualReport']['eventId'])) | 5,327,253 |
def get2DHisto_(detector,plotNumber,geometry):
"""
This function opens the appropiate ROOT file,
extracts the TProfile2D and turns it into a Histogram,
if it is a compound detector, this function
takes care of the subdetectors' addition.
Note that it takes plotNumber as opposed to plot
"""
histo = None
rootFile = TFile()
detectorFilename = 'matbdg_%s_%s.root'%(detector,geometry)
if detector not in COMPOUNDS.keys() or checkFile_(detectorFilename):
if not checkFile_(detectorFilename):
print('Warning: %s not found' % detectorFilename)
return 0
rootFile = TFile.Open(detectorFilename,'READ')
prof = rootFile.Get("%d" % plotNumber)
if not prof: return 0
# Prevent memory leaking by specifing a unique name
prof.SetName('%u_%s_%s' %(plotNumber,detector,geometry))
prof.__class__ = TProfile2D
histo = prof.ProjectionXY()
else:
histos = OrderedDict()
theFiles = []
for subDetector in COMPOUNDS[detector]:
subDetectorFilename = 'matbdg_%s_%s.root' % (subDetector,geometry)
if not checkFile_(subDetectorFilename):
print('Warning: %s not found'%subDetectorFilename)
continue
subDetectorFile = TFile.Open(subDetectorFilename,'READ')
theFiles.append(subDetectorFile)
print('*** Open file... %s' % subDetectorFilename)
prof = subDetectorFile.Get('%d'%plotNumber)
if not prof: return 0
prof.__class__ = TProfile2D
if not histo:
histo = prof.ProjectionXY('B_%s' % prof.GetName())
else:
histo.Add(prof.ProjectionXY('B_%s' % prof.GetName()))
return copy.deepcopy(histo) | 5,327,254 |
def binlog2sql(request):
"""
通过解析binlog获取SQL
:param request:
:return:
"""
instance_name = request.POST.get('instance_name')
save_sql = True if request.POST.get('save_sql') == 'true' else False
instance = Instance.objects.get(instance_name=instance_name)
no_pk = True if request.POST.get('no_pk') == 'true' else False
flashback = True if request.POST.get('flashback') == 'true' else False
back_interval = 0 if request.POST.get('back_interval') == '' else int(request.POST.get('back_interval'))
num = 30 if request.POST.get('num') == '' else int(request.POST.get('num'))
start_file = request.POST.get('start_file')
start_pos = request.POST.get('start_pos') if request.POST.get('start_pos') == '' else int(
request.POST.get('start_pos'))
end_file = request.POST.get('end_file')
end_pos = request.POST.get('end_pos') if request.POST.get('end_pos') == '' else int(request.POST.get('end_pos'))
stop_time = request.POST.get('stop_time')
start_time = request.POST.get('start_time')
only_schemas = request.POST.getlist('only_schemas')
only_tables = request.POST.getlist('only_tables[]')
only_dml = True if request.POST.get('only_dml') == 'true' else False
sql_type = ['INSERT', 'UPDATE', 'DELETE'] if request.POST.getlist('sql_type[]') == [] else request.POST.getlist(
'sql_type[]')
# 校验sql_type
if [i for i in sql_type if i not in ['INSERT', 'UPDATE', 'DELETE']]:
return JsonResponse({'status': 1, 'msg': '类型过滤参数不正确', 'data': {}})
# flashback=True获取DML回滚语句
result = {'status': 0, 'msg': 'ok', 'data': ''}
# 提交给binlog2sql进行解析
binlog2sql = Binlog2Sql()
# 准备参数
args = {"conn_options": fr"-h{shlex.quote(str(instance.host))} -u{shlex.quote(str(instance.user))} \
-p'{shlex.quote(str(instance.password))}' -P{shlex.quote(str(instance.port))} ",
"stop_never": False,
"no-primary-key": no_pk,
"flashback": flashback,
"back-interval": back_interval,
"start-file": start_file,
"start-position": start_pos,
"stop-file": end_file,
"stop-position": end_pos,
"start-datetime": '"'+start_time+'"',
"stop-datetime": '"'+stop_time+'"',
"databases": ' '.join(only_schemas),
"tables": ' '.join(only_tables),
"only-dml": only_dml,
"sql-type": ' '.join(sql_type),
"instance": instance
}
# 参数检查
args_check_result = binlog2sql.check_args(args)
if args_check_result['status'] == 1:
return HttpResponse(json.dumps(args_check_result), content_type='application/json')
# 参数转换
cmd_args = binlog2sql.generate_args2cmd(args, shell=True)
# 执行命令
try:
p = binlog2sql.execute_cmd(cmd_args, shell=True)
# 读取前num行后结束
rows = []
n = 1
for line in iter(p.stdout.readline, ''):
if n <= num:
n = n + 1
row_info = {}
try:
row_info['sql'] = line.split('; #')[0] + ";"
row_info['binlog_info'] = line.split('; #')[1].rstrip('\"')
except IndexError:
row_info['sql'] = line
row_info['binlog_info'] = None
rows.append(row_info)
else:
break
if rows.__len__() == 0:
# 判断是否有异常
stderr = p.stderr.read()
if stderr:
result['status'] = 1
result['msg'] = stderr
return HttpResponse(json.dumps(result), content_type='application/json')
# 终止子进程
p.kill()
result['data'] = rows
except Exception as e:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(e)
# 异步保存到文件
if save_sql:
args.pop('conn_options')
async_task(binlog2sql_file, args=args, user=request.user, hook=notify_for_binlog2sql, timeout=-1,
task_name=f'binlog2sql-{time.time()}')
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json') | 5,327,255 |
def test_retrieve_sentry_logs_nostacktrace():
"""Test retrieve sentry logs."""
responses.add(responses.GET, 'https://sentry.devshift.net/api/0/projects/'
'sentry/fabric8-analytics-production/issues/'
'?statsPeriod=24h',
json=sentry_issues_res, status=200)
responses.add(responses.GET, 'https://sentry.devshift.net/api/0/issues/'
'12666/events/latest/', json=sentry_tags_res_nostack, status=200)
res = sobj.retrieve_sentry_logs('2019-05-14', '2019-05-15')
expected_output = {"error_report": {"bayesian-data-importer":
{"total_errors": 1, "errors":
[{"id": "12666", "last_seen":
"2019-05-15T06:50:10Z",
"bayesian-data-importer-52-fgp4f":
"TypeError: must be str, not list",
"stacktrace": "Not Available"}]}}}
assert (res == expected_output) | 5,327,256 |
def save_fig(fig, name, path, tight_layout=True):
"""
Saves a `matplotlib.pyplot.figure` as pdf file.
:param matplotlib.pyplot.figure fig: instance of a `matplotlib.pyplot.figure` to save
:param str name: filename without extension
:param str path: path where the figure is saved, if None the figure is saved at the results directory
:param bool crop: bool if the figure is cropped before saving
"""
if tight_layout:
fig.tight_layout()
if not os.path.exists(path):
os.makedirs(path)
fig.savefig(os.path.join(path, f'{name}.pdf'), transparent=True) | 5,327,257 |
def smtp_config_generator_str(results, key, inp):
"""
Set server/username config.
:param kwargs: Values. Refer to `:func:smtp_config_writer`.
:type kwargs: dict
:param key: Key for results dict.
:type key: str
:param inp: Input question.
:type inp: str
"""
if results[key] is None:
results[key] = input(inp)
return results | 5,327,258 |
def delete_object_by_name(name, ignore_errors=False):
"""
Attempts to find an object by the name given and deletes it from the scene.
:param name: the name of this object
:param ignore_errors: if True, no exception is raised when the object is deleted. Otherwise, you will get a
KeyError if no object by that name exists.
:return: True if the object was found and deleted successfully
"""
try:
logging.debug("Attempting to delete object '%s'" % name)
obj = data.objects[name]
except KeyError as ex:
if ignore_errors: # are we ignoring errors?
logging.debug("Didn't delete '%s'. Probably didn't exist. Error ignored." % name)
return False # just report that we weren't successful
raise ex # object doesn't exist so raise this exception
ops.object.select_all(action='DESELECT')
obj.select_set(state=True)
context.view_layer.objects.active = obj
bpy.ops.object.delete() | 5,327,259 |
def default_rollout_step(policy, obs, step_num):
"""
The default rollout step function is the policy's compute_action function.
A rollout step function allows a developer to specify the behavior
that will occur at every step of the rollout--given a policy
and the last observation from the env--to decide
what action to take next. This usually involves the rollout's
policy and may perform learning. It also, may involve using, updating,
or saving learning related state including hyper-parameters
such as epsilon in epsilon greedy.
You can provide your own function with the same signature as this default
if you want to have a more complex behavior at each step of the rollout.
"""
return policy.compute_action(obs) | 5,327,260 |
def perfilsersic(r_e, I_e, n, r):
"""Evaluate a Sersic Profile.
funcion que evalua a un dado radio r el valor de
brillo correspondiente a un perfil de sersic
r_e : Radio de escala
I_e : Intensidad de escala
n : Indice de Sersic
r : Radio medido desde el centro en pixeles
"""
b = 1.999 * n - 0.327
I_r = I_e * np.exp(-b * (((r / r_e) ** (1 / np.float(n))) - 1))
I_r = I_r / (I_e * np.exp(-b * (((0.0 / r_e) ** (1 / np.float(n))) - 1)))
return I_r | 5,327,261 |
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
import numpy as np
from scipy import interpolate
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std | 5,327,262 |
def get_object(bucket,key,fname):
"""Given a bucket and a key, upload a file"""
return aws_s3api(['get-object','--bucket',bucket,'--key',key,fname]) | 5,327,263 |
def test_fetchyaml_with_destination_encoding_config():
"""Get encoding from config."""
context = Context({
'keyhere': {'sub': ['outkey', 2, 3], 'arbk': 'arbfile'},
'fetchYaml': {
'path': '/arb/{keyhere[arbk]}',
'key': '{keyhere[sub][0]}'}})
with patch('pypyr.steps.fetchyaml.open', mock_open(
read_data='1: 2\n2: 3')) as mock_file:
filefetcher.run_step(context)
mock_file.assert_called_with('/arb/arbfile', encoding='utf-16')
assert len(context) == 3
assert context['outkey'] == {1: 2, 2: 3}
assert context['keyhere'] == {'sub': ['outkey', 2, 3], 'arbk': 'arbfile'}
assert context['fetchYaml'] == {
'path': '/arb/{keyhere[arbk]}',
'key': '{keyhere[sub][0]}'} | 5,327,264 |
def find_file(filename):
"""
This helper function checks whether the file exists or not
"""
file_list = list(glob.glob("*.txt"))
if filename in file_list:
return True
else:
return False | 5,327,265 |
def read(fname):
"""Read a file and return its content."""
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read() | 5,327,266 |
def run(room, spawn):
# type: (RoomMind, StructureSpawn) -> None
"""
Activates the spawner, spawning what's needed, as determined by the RoomMind.
Manages deciding what parts belong on what creep base as well.
:type room: rooms.room_mind.RoomMind
:type spawn: StructureSpawn
:type
"""
if spawn.spawning or room.squads.any_high_priority_renew():
return
role_obj = room.get_next_role()
# This is what is represented by "role_obj"
# return {
# "role": role_needed,
# "base": self.get_variable_base(role_needed),
# "replacing": self.get_next_replacement_name(role_needed),
# "num_sections": self.get_max_sections_for_role(role_needed),
# }
if not role_obj:
# TODO: at this point, figure out how long until the next replacement is needed!
# if not room.mem.spawning_already_reported_no_next_role:
# print("[{}][spawning] All roles are good, no need to spawn more!".format(room.name))
# room.mem.spawning_already_reported_no_next_role = True
return
role = role_obj[roleobj_key_role]
base = role_obj[roleobj_key_base]
num_sections = role_obj[roleobj_key_num_sections] or 0
replacing = role_obj[roleobj_key_replacing]
ubos_cache = volatile_cache.mem("energy_used_by_other_spawns")
if ubos_cache.has(room.name):
filled = spawn.room.energyAvailable - ubos_cache.get(room.name)
else:
filled = spawn.room.energyAvailable
# If we have very few harvesters, try to spawn a new one! But don't make it too small, if we already have a big
# harvester. 150 * work_mass will make a new harvester somewhat smaller than the existing one, but it shouldn't be
# too bad. We *can* assume that all work_mass at this point is in harvesters, since consistency.reassign_roles()
# will reassign everyone to harvester if there are fewer than 2 harvesters existing.
if emergency_conditions(room):
print("[{}] WARNING: Bootstrapping room!".format(room.name))
energy = filled
else:
energy = spawn.room.energyCapacityAvailable
half_section = 1 if num_sections % 1 else 0
num_sections -= num_sections % 1 # This is so as to only create expected behavior with half-sections
if num_sections is not None and base in scalable_sections:
if (num_sections <= 0 or not num_sections) and not (num_sections is 0 and half_section): # Catch NaN here too?
print("[{}][spawning] Trying to spawn a 0-section {} creep! Changing this to a 1-section creep!"
.format(room.name, base))
num_sections = 1
role_obj[roleobj_key_num_sections] = 1
cost = cost_of_sections(base, num_sections, energy) + half_section * half_section_cost(base)
if not cost:
print("[{}][spawning] ERROR: Unknown cost retrieved from cost_of_sections({}, {}, {}): {}"
.format(room.name, base, num_sections, energy, cost))
cost = Infinity
if cost > energy:
new_size = max_sections_of(room, base)
if new_size <= 0:
if low_energy_dynamic.includes(base):
cost = energy
else:
print("[{}][spawning] ERROR: Trying to spawn a {}, which we don't have enough energy for even 1"
" section of!".format(room.name, base))
return
else:
print("[{}][spawning] Adjusted creep size from {} to {} to match available energy."
.format(room.name, num_sections, new_size))
# Since the literal memory object is returned, this mutation will stick for until this creep has been
# spawned, or the target creep has been refreshed
num_sections = role_obj[roleobj_key_num_sections] = new_size
half_section = 1 if num_sections % 1 else 0
num_sections -= num_sections % 1
cost = cost_of_sections(base, num_sections, energy) + half_section * half_section_cost(base)
energy = cost
if filled < energy:
# print("[{}][spawning] Room doesn't have enough energy! {} < {}!".format(room.name, filled, energy))
return
descriptive_level = None # type: Any
if base is creep_base_1500miner:
parts = []
work_cost = BODYPART_COST[WORK]
move_cost = BODYPART_COST[MOVE]
if energy < work_cost * 3 + move_cost: # 350 on official servers
print("[{}][spawning] Building sub-optimal dedicated miner!".format(room.name))
num_work = math.floor((energy - move_cost) / work_cost)
num_move = math.floor((energy - num_work * work_cost) / move_cost)
else:
num_move = num_sections or 3
num_work = 3
for i in range(0, num_work):
parts.append(WORK)
for i in range(0, num_move):
parts.append(MOVE)
descriptive_level = "work:{}-move:{}".format(num_work, num_move)
elif base is creep_base_3000miner:
work_cost = BODYPART_COST[WORK]
move_cost = BODYPART_COST[MOVE]
parts = []
if energy < work_cost * 5 + move_cost: # 550 on offical servers
print("[{}][spawning] Building sub-optimal dedicated miner!".format(room.name))
num_work = math.floor((energy - move_cost) / work_cost)
num_move = math.floor((energy - num_work * work_cost) / move_cost)
else:
num_move = num_sections or 5
num_work = 5
for i in range(0, num_work):
parts.append(WORK)
for i in range(0, num_move):
parts.append(MOVE)
descriptive_level = "work:{}-move:{}".format(num_work, num_move)
elif base is creep_base_4000miner:
work_cost = BODYPART_COST[WORK]
move_cost = BODYPART_COST[MOVE]
parts = []
if energy < work_cost * 7 + move_cost: # 750 on official servers
print("[{}][spawning] Building sub-optimal dedicated miner!".format(room.name))
num_work = math.floor((energy - move_cost) / work_cost)
num_move = math.floor((energy - num_work * work_cost) / move_cost)
else:
num_move = num_sections or 7
num_work = 7
for i in range(0, num_work):
parts.append(WORK)
for i in range(0, num_move):
parts.append(MOVE)
descriptive_level = "work:{}-move:{}".format(num_work, num_move)
elif base is creep_base_carry3000miner:
work_cost = BODYPART_COST[WORK]
move_cost = BODYPART_COST[MOVE]
carry_cost = BODYPART_COST[CARRY]
if energy < work_cost * 5 + move_cost + carry_cost:
print("[{}][spawning] Too few extensions to build a dedicated 3000 miner with carry!"
.format(room.name))
if Game.time % 30 == 3:
room.reset_planned_role()
return
parts = []
num_move = num_sections or 5
num_work = 5
for i in range(0, num_work):
parts.append(WORK)
parts.append(CARRY)
for i in range(0, num_move):
parts.append(MOVE)
descriptive_level = num_move
elif base is creep_base_reserving:
parts = []
for i in range(0, num_sections):
parts.append(MOVE)
for i in range(0, num_sections):
parts.append(CLAIM)
descriptive_level = num_sections
elif base is creep_base_claiming:
claim_cost = BODYPART_COST[CLAIM]
move_cost = BODYPART_COST[MOVE]
if energy >= claim_cost + move_cost * 7:
parts = [MOVE, MOVE, MOVE, MOVE, MOVE, MOVE, CLAIM, MOVE]
elif energy >= claim_cost + move_cost * 4:
parts = [MOVE, MOVE, MOVE, CLAIM, MOVE]
elif energy >= claim_cost + move_cost * 2:
parts = [MOVE, CLAIM, MOVE]
elif energy > claim_cost + move_cost:
parts = [CLAIM, MOVE]
else:
print("[{}][spawning] Too few extensions to build a claim creep!"
.format(room.name))
if Game.time % 30 == 3:
room.reset_planned_role()
return
elif base is creep_base_claim_attack:
parts = []
for i in range(0, half_section):
parts.append(TOUGH)
for i in range(0, num_sections * 5):
parts.append(CLAIM)
for i in range(0, num_sections * 5 + half_section * 2):
parts.append(MOVE)
for i in range(0, half_section):
parts.append(HEAL)
if half_section:
descriptive_level = 'claim:{}-heal:{}'.format(num_sections * 5, half_section)
else:
descriptive_level = 'claim:{}'.format(num_sections)
elif base is creep_base_hauler:
parts = []
for i in range(0, num_sections):
parts.append(CARRY)
for i in range(0, num_sections):
parts.append(MOVE)
descriptive_level = num_sections
elif base is creep_base_half_move_hauler:
parts = []
for i in range(0, num_sections * 2 + half_section):
parts.append(CARRY)
for i in range(0, num_sections + half_section):
parts.append(MOVE)
descriptive_level = num_sections
elif base is creep_base_work_full_move_hauler:
parts = []
for i in range(0, num_sections):
parts.append(CARRY)
for part in initial_section[base]:
parts.append(part)
for i in range(0, num_sections):
parts.append(MOVE)
descriptive_level = num_sections
elif base is creep_base_work_half_move_hauler:
parts = []
for i in range(0, num_sections * 2 + half_section):
parts.append(CARRY)
for part in initial_section[base]:
parts.append(part)
for i in range(0, num_sections + half_section):
parts.append(MOVE)
descriptive_level = num_sections * 2 + 1
elif base is creep_base_worker:
move_cost = BODYPART_COST[MOVE]
carry_cost = BODYPART_COST[CARRY]
work_cost = BODYPART_COST[WORK]
if energy >= move_cost * 4 + carry_cost * 3 + work_cost: # 450 on official servers
parts = []
for i in range(0, num_sections):
parts.append(CARRY)
parts.append(CARRY)
parts.append(CARRY)
parts.append(MOVE)
for i in range(0, num_sections + half_section):
parts.append(WORK)
for i in range(0, num_sections * 3 + half_section):
parts.append(MOVE)
descriptive_level = "carry:{}-work:{}".format(num_sections * 3, num_sections)
elif energy >= move_cost * 3 + carry_cost * 2 + work_cost: # 400 on official servers
parts = [MOVE, MOVE, MOVE, CARRY, CARRY, WORK]
descriptive_level = "carry:2-work:1"
elif energy >= move_cost * 2 + carry_cost + work_cost: # 250 on official servers
parts = [MOVE, MOVE, CARRY, WORK]
descriptive_level = "carry:1-work:1"
else:
print("[{}][spawning] Too few extensions to build a worker ({}/{} energy)!".format(room.name, energy, 250))
if Game.time % 30 == 3:
room.reset_planned_role()
return
elif base is creep_base_defender:
parts = []
# # MOVE, MOVE, ATTACK, TOUCH = one section = 190
# MOVE, ATTACK, CARRY = one section = 180 [TOUGH, MOVE, MOVE, MOVE, ATTACK, ATTACK],
for i in range(0, num_sections):
parts.append(TOUGH)
for i in range(0, math.floor(num_sections * 1.5)):
parts.append(MOVE)
for i in range(0, num_sections * 2 + half_section):
parts.append(ATTACK)
for i in range(0, math.ceil(num_sections * 1.5) + half_section):
parts.append(MOVE)
descriptive_level = num_sections
elif base is creep_base_rampart_defense:
parts = []
for i in range(0, num_sections + half_section):
parts.append(MOVE)
for i in range(0, num_sections * 2 + half_section):
parts.append(ATTACK)
descriptive_level = num_sections * 2 + half_section
elif base is creep_base_ranged_offense:
parts = []
for i in range(0, num_sections):
parts.append(RANGED_ATTACK)
for i in range(0, 1 + num_sections):
parts.append(MOVE)
parts.append(HEAL)
descriptive_level = num_sections
elif base is creep_base_3h:
parts = []
for i in range(0, half_section * 2):
parts.append(TOUGH)
for i in range(0, num_sections):
parts.append(RANGED_ATTACK)
for i in range(0, 3 + 2 * half_section + num_sections):
parts.append(MOVE)
for i in range(0, 3):
parts.append(HEAL)
descriptive_level = num_sections
elif base is creep_base_mammoth_miner:
parts = [MOVE, CARRY]
move_cost = BODYPART_COST[MOVE]
carry_cost = BODYPART_COST[CARRY]
work_cost = BODYPART_COST[WORK]
energy_counter = move_cost + carry_cost
part_counter = 2
move_counter = 0.25
# TODO: this would be much better if done in constant time.
for i in range(0, 2):
if part_counter >= MAX_CREEP_SIZE:
break
if energy_counter >= energy - move_cost:
break
# parts.append(CARRY)
# energy_counter += carry_cost
# part_counter += 1
# move_counter += 0.25
for _ignored in range(0, 25):
if move_counter >= 1:
if part_counter >= MAX_CREEP_SIZE:
break
if energy_counter >= energy - move_cost:
break
parts.append(MOVE)
energy_counter += move_cost
part_counter += 1
move_counter -= 1
if part_counter >= MAX_CREEP_SIZE:
break
if energy_counter >= energy - work_cost:
break
parts.append(WORK)
energy_counter += work_cost
part_counter += 1
move_counter += 0.25
elif base is creep_base_goader:
parts = []
for i in range(0, num_sections * 2 + 1 + half_section): # extra tough in initial section
parts.append(TOUGH)
parts.append(ATTACK)
for i in range(0, num_sections + 1 + half_section): # extra move in initial section
parts.append(MOVE)
elif base is creep_base_full_move_goader:
parts = []
for i in range(0, num_sections * 2):
parts.append(CARRY)
for i in range(0, num_sections):
parts.append(TOUGH)
parts.append(ATTACK)
for i in range(0, num_sections + 1): # extra move in initial section
parts.append(MOVE)
elif base is creep_base_half_move_healer:
parts = []
total_heal = num_sections * 2 + half_section
total_move = num_sections + half_section
for i in range(0, math.floor(total_move / 2)):
parts.append(MOVE)
for i in range(0, math.floor(total_heal / 2)):
parts.append(HEAL)
for i in range(0, math.ceil(total_move / 2)):
parts.append(MOVE)
for i in range(0, math.ceil(total_heal / 2)):
parts.append(HEAL)
elif base is creep_base_full_move_healer:
parts = []
for i in range(0, math.floor(num_sections / 2)):
parts.append(MOVE)
for i in range(0, math.floor(num_sections / 2)):
parts.append(HEAL)
for i in range(0, math.ceil(num_sections / 2)):
parts.append(MOVE)
for i in range(0, math.ceil(num_sections / 2)):
parts.append(HEAL)
elif base is creep_base_squad_healer:
parts = []
for i in range(0, num_sections):
parts.append(MOVE)
for i in range(0, num_sections):
parts.append(HEAL)
elif base is creep_base_squad_ranged:
parts = []
for i in range(0, num_sections):
parts.append(MOVE)
for i in range(0, num_sections):
parts.append(RANGED_ATTACK)
elif base is creep_base_squad_dismantle:
parts = []
for i in range(0, math.floor(num_sections / 2)):
parts.append(MOVE)
for i in range(0, num_sections):
parts.append(WORK)
for i in range(0, math.ceil(num_sections / 2)):
parts.append(MOVE)
elif base is creep_base_dismantler:
parts = []
for i in range(0, num_sections * 2 + half_section):
parts.append(WORK)
for i in range(0, num_sections + half_section):
parts.append(MOVE)
elif base is creep_base_full_move_dismantler:
parts = []
for i in range(0, num_sections):
parts.append(WORK)
for i in range(0, num_sections):
parts.append(MOVE)
elif base is creep_base_full_upgrader:
if num_sections > 1 or half_section:
parts = [CARRY]
num_work = num_sections * 2 + half_section
num_move = num_sections + half_section + 1
for i in range(0, num_work):
parts.append(WORK)
if num_work > 15:
# Technically the initial section always has 2 carry parts,
# but let's not include this second one if we don't need to
parts.append(CARRY)
elif half_section:
# we have one fewer CARRY and one fewer work in the half section, so we can afford to have 1 less MOVE.
num_move -= 1
for i in range(0, num_move):
parts.append(MOVE)
descriptive_level = num_work
else:
parts = [MOVE, CARRY, WORK]
descriptive_level = "min"
elif base is creep_base_power_attack:
parts = []
for i in range(0, num_sections):
parts.append(TOUGH)
for i in range(0, num_sections * 2 + half_section):
parts.append(MOVE)
for i in range(0, num_sections * 3 + half_section):
parts.append(ATTACK)
elif base is creep_base_full_move_attack:
parts = []
for i in range(0, num_sections):
parts.append(MOVE)
for i in range(0, num_sections):
parts.append(ATTACK)
elif base is creep_base_scout:
parts = [MOVE]
else:
print("[{}][spawning] Unknown creep base {}! Role object: {}".format(room.name, base,
JSON.stringify(role_obj)))
room.reset_planned_role()
return
name = naming.random_digits()
if Game.creeps[name]:
name = naming.random_digits()
home = room.name
if replacing:
memory = {
"home": home,
"role": role_temporary_replacing,
"replacing": replacing,
"replacing_role": role
}
else:
memory = {"home": home, "role": role}
if role_obj[roleobj_key_initial_memory]:
# Add whatever memory seems to be necessary
_.extend(memory, role_obj[roleobj_key_initial_memory])
if _.sum(parts, lambda p: BODYPART_COST[p]) > spawn.room.energyAvailable - ubos_cache.get(room.name):
print("[{}][spawning] Warning: Generated too costly of a body for a {}! Available energy: {}, cost: {}."
.format(room.name, role, spawn.room.energyAvailable - ubos_cache.get(room.name),
_.sum(parts, lambda p: BODYPART_COST[p])))
room.reset_planned_role()
return
# if descriptive_level:
# if replacing:
# print("[{}][spawning] Spawning {}, a {} with body {} level {}, live-replacing {}.".format(
# room.name, name, role, base, descriptive_level, replacing))
# else:
# print("[{}][spawning] Spawning {}, a {} with body {} level {}.".format(
# room.name, name, role, base, descriptive_level))
# else:
# if replacing:
# print("[{}][spawning] Spawning {}, a {} with body {}, live-replacing {}.".format(
# room.name, name, role, base, replacing))
# else:
# print("[{}][spawning] Spawning {}, a {} with body {}.".format(room.name, name, role, base))
result = spawn.createCreep(parts, name, memory)
if result not in Game.creeps:
print("[{}][spawning] Invalid response from createCreep: {}".format(room.name, result))
if result == ERR_NOT_ENOUGH_RESOURCES:
print("[{}][spawning] Couldn't create body {} with energy {} (target num_sections: {})!"
.format(room.name, parts, energy, num_sections))
elif result == ERR_INVALID_ARGS:
if descriptive_level:
print("[{}][spawning] Produced invalid body array for creep type {} level {}: {}"
.format(room.name, base, descriptive_level, JSON.stringify(parts)))
else:
print("[{}][spawning] Produced invalid body array for creep type {}: {}"
.format(room.name, base, JSON.stringify(parts)))
else:
result = cast(str, result)
used = ubos_cache.get(room.name) or 0
used += postspawn_calculate_cost_of(parts)
ubos_cache.set(room.name, used)
room.reset_planned_role()
if role_obj[roleobj_key_initial_targets]:
for target_type, target_id in role_obj[roleobj_key_initial_targets]:
room.hive.targets.manually_register(cast(Creep, {'name': name}), target_type, target_id)
if role_obj[roleobj_key_request_identifier]:
room.successfully_spawned_request(role_obj[roleobj_key_request_identifier])
if role_obj[roleobj_key_run_after_spawning]:
__pragma__('js', '(eval(role_obj[roleobj_key_run_after_spawning]))')(name)
if replacing:
room.register_new_replacing_creep(replacing, result)
else:
room.register_to_role(Game.creeps[result]) | 5,327,267 |
def tweetnacl_crypto_secretbox(max_messagelength=256):
"""
max_messagelength: maximum length of the message, in bytes.
i.e., the symbolic execution will not consider messages longer than max_messagelength
"""
proj = tweetnaclProject()
state = funcEntryState(proj, "crypto_secretbox_xsalsa20poly1305_tweet", [
("c", pointerToUnconstrainedPublic()), # Output parameter, will hold ciphertext, length 'mlen'
("m", pointerToUnconstrainedPublic()), # message: length 'mlen'
("mlen", publicValue()), # length of message. Not a pointer
("n", pointerTo(secretArray(24), 24)), # nonce, buffer of size crypto_secretbox_NONCEBYTES
("k", pointerTo(secretArray(32), 32)) # secret key: size 32 bytes
])
state.add_constraints(getArgBVS(state, 'mlen') <= max_messagelength)
addDevURandom(state)
return (proj, state) | 5,327,268 |
def parameterized_dropout(probs: Tensor,
mask: Tensor,
values: Tensor,
random_rate: float = 0.5,
epsilon: float = 0.1) -> Tensor:
"""
This function returns (values * mask) if random_rate == 1.0 and
(values * probs) if random_rate == 0.0 or if we are in eval mode
(self.training == false). Otherwise, it randomly selects on frame-by-frame
/ vector-by-vector basis, which of the two to use. The main point of this
function is that it intelligently backpropagates derivatives in such a way
that you can meaningfully train `probs`. See the function `get_derivative_scales()`
to understand the central point of how we get derivatives w.r.t. `probs`.
Args:
probs: the probabilities with which the `mask` vector was chosen; we'll be able
to compute derivatives w.r.t. this. A Tensor of shape (*, C) where C is
interpreted as the channel dimension. These must be in the interval [0,1].
mask: A (possibly boolean) Tensor of shape (*, C) and values 0/False or 1/True,
True/1 if this value is to be "passed through".
The caller asserts that these values have been chosen with probabilities
equal to `probs`, e.g. as:
mask = (torch.rand_like(probs) < probs)
(In practice we may be sampling with a more complicated method which has
marginal probabilities equal to `probs`; the correctness of the derivatives
becomes a little weaker in that case).
values: A Tensor of shape (*, C), the same as probs_and mask; these are the
values that are to be multiplied by a mask (or sometimes scaled by `probs`,
if random_rate < 1). The derivatives backpropagated to here are exact,
i.e. just output_grad * mask. We currently require that elements of values
be in the interval [0,1] (this is needed for a formula involving epsilon).
random_rate: A float value that determines how often we use the zero-one mask; the
rest of the time, we use the expected value (probs).
epsilon: A float value used to prevent division by zero in backprop; controls
a bias-variance tradeoff in derivatives (small->lower bias, higher
variance).
Returns: A Tensor with the same shape as `probs`, `mask` and `values`, i.e.
(*, C), which is randomly somewhere between values * mask and
values * probs.
"""
return _ParameterizedDropout.apply(probs, mask, values, random_rate, epsilon) | 5,327,269 |
def get_auto_scale_v_core(resource_group_name: Optional[str] = None,
vcore_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutoScaleVCoreResult:
"""
Represents an instance of an auto scale v-core resource.
Latest API Version: 2021-01-01.
:param str resource_group_name: The name of the Azure Resource group of which a given PowerBIDedicated capacity is part. This name must be at least 1 character in length, and no more than 90.
:param str vcore_name: The name of the auto scale v-core. It must be a minimum of 3 characters, and a maximum of 63.
"""
pulumi.log.warn("""get_auto_scale_v_core is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:powerbidedicated:getAutoScaleVCore'.""")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vcoreName'] = vcore_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:powerbidedicated/latest:getAutoScaleVCore', __args__, opts=opts, typ=GetAutoScaleVCoreResult).value
return AwaitableGetAutoScaleVCoreResult(
capacity_limit=__ret__.capacity_limit,
capacity_object_id=__ret__.capacity_object_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type) | 5,327,270 |
def fix_python_dylib_for_pkg(self):
"""change dylib ref to point to loader in package build format"""
self.cmd.chdir(self.prefix)
self.cmd.chmod(self.product.dylib)
self.install_name_tool_id(
f"@loader_path/../../../../support/{self.product.name_ver}/{self.product.dylib}",
self.product.dylib,
)
self.cmd.chdir(self.project.root) | 5,327,271 |
def test_youtube_settings(mocker, settings):
"""
Test that Youtube object creation uses YT_* settings for credentials
"""
settings.YT_ACCESS_TOKEN = "yt_access_token"
settings.YT_CLIENT_ID = "yt_client_id"
settings.YT_CLIENT_SECRET = "yt_secret"
settings.YT_REFRESH_TOKEN = "yt_refresh"
mock_oauth = mocker.patch("cloudsync.youtube.oauth2client.client.GoogleCredentials")
YouTubeApi()
mock_oauth.assert_called_with(
settings.YT_ACCESS_TOKEN,
settings.YT_CLIENT_ID,
settings.YT_CLIENT_SECRET,
settings.YT_REFRESH_TOKEN,
None,
"https://accounts.google.com/o/oauth2/token",
None,
) | 5,327,272 |
def text_output(xml,count):
"""Returns JSON-formatted text from the XML retured from E-Fetch"""
xmldoc = minidom.parseString(xml.encode('utf-8').strip())
jsonout = []
for i in range(count):
title = ''
title = xmldoc.getElementsByTagName('ArticleTitle')
title = parse_xml(title, i, '')
pmid = ''
pmid = xmldoc.getElementsByTagName('PMID')
pmid = parse_xml(pmid, i, '')
abstract = ''
abstract = xmldoc.getElementsByTagName('AbstractText')
abstract = parse_xml(abstract, i, '')
try:
authors = xmldoc.getElementsByTagName('AuthorList')
authors = authors[i].getElementsByTagName('Author')
authorlist = []
for author in authors:
LastName = author.getElementsByTagName('LastName')
LastName = parse_xml(LastName, 0, '')
Initials = author.getElementsByTagName('Initials')
Initials = parse_xml(Initials, 0, '')
if LastName != '' and Initials != '':
author = '%s, %s' % (LastName, Initials)
else:
author = ''
authorlist.append(author)
except Exception:
authorlist = []
pass
try:
journalinfo = xmldoc.getElementsByTagName('Journal')[i]
journalIssue = journalinfo.getElementsByTagName('JournalIssue')[0]
except Exception:
journalinfo = None
journalIssue = None
pass
journal = ''
year = ''
volume = ''
issue = ''
pages = ''
if journalinfo != None:
journal = parse_xml(journalinfo.getElementsByTagName('Title'), 0, '')
year = journalIssue.getElementsByTagName('Year')
year = parse_xml(year, 0, '')
volume = journalIssue.getElementsByTagName('Volume')
volume = parse_xml(volume, 0, '')
issue = journalIssue.getElementsByTagName('Issue')
issue = parse_xml(issue, 0, '')
pages = xmldoc.getElementsByTagName('MedlinePgn')
pages = parse_xml(pages, 0, '')
jsonout.append({
'pmid':pmid,
'title':title,
'authors':authorlist,
'journal':journal,
'year':year,
'volume':volume,
'issue':issue,
'pages':pages,
'abstract':abstract
})
return json.dumps(jsonout) | 5,327,273 |
def test_instantiations():
"""@TODO: Docs. Contribution is welcome."""
r = Registry()
r.add(foo)
res = r.get_instance("foo", 1, 2)()
assert res == {"a": 1, "b": 2}
res = r.get_instance("foo", 1, b=2)()
assert res == {"a": 1, "b": 2}
res = r.get_instance("foo", a=1, b=2)()
assert res == {"a": 1, "b": 2} | 5,327,274 |
def define_not_worked_days(list_of_days):
"""
Define specific days off
Keyword arguments:
list_of_days -- list of integer (0: Monday ... 6: Sunday) - default [5, 6]
"""
global NOT_WORKED_DAYS
NOT_WORKED_DAYS = list_of_days
return | 5,327,275 |
def main(basic_files: list, start,increment,test):
""" Renumber BASIC v2 Programs
Support GOTO/GOSUB renumbering via a simple two-pass algorithm
Known limitations: also renumber strings containing GOTO <number> because it is unable to skip
quoted strings.
Author: Giovanni Giorgi
"""
if test:
print("Self test...")
import doctest
(fails, something) = doctest.testmod(sys.modules[__name__], verbose=True)
if fails == 0:
sys.exit(0)
else:
sys.exit(1)
for fname in basic_files:
print("Renumbering",fname)
old2new=collect_numbers(fname,start,increment)
dest_filename=renumber_file(fname,old2new)
fix_goto_gosub(dest_filename,old2new)
Path(dest_filename).replace(fname) | 5,327,276 |
def datetime2str(target, fmt='%Y-%m-%d %H:%M:%S'):
"""
将datetime对象转换成字符串
:param target: datetime
:param fmt: string
:return: string
"""
return datetime.datetime.strftime(target, fmt) | 5,327,277 |
def periodic_targets_form(request, program):
"""
Returns a form for the periodic targets sub-section,
used by the Indicator Form
For historical reasons, the input is a POST of the whole indicator form sent via ajax
from which a subset of fields are used to generate the returned template
"""
if not request.has_write_access:
raise PermissionDenied
program = get_object_or_404(Program, pk=program)
form = PTFormInputsForm(data=request.POST)
if not form.is_valid():
return JsonResponse(form.errors)
event_name = ''
start_date = ''
target_frequency_num_periods = 1
target_frequency_type = form.cleaned_data.get('target_frequency')
if target_frequency_type in Indicator.REGULAR_TARGET_FREQUENCIES:
start_date = program.reporting_period_start
target_frequency_num_periods = len(
[p for p in PeriodicTarget.generate_for_frequency(
target_frequency_type)(start_date, program.reporting_period_end)])
generated_targets = generate_periodic_targets(
target_frequency_type, start_date, target_frequency_num_periods, event_name)
dummy_indicator = Indicator(
target_frequency=target_frequency_type,
unit_of_measure_type=form.cleaned_data.get('unit_of_measure_type'),
is_cumulative=False,
)
content = render_to_string('indicators/indicatortargets.html', {
'indicator': dummy_indicator,
'periodic_targets': generated_targets
})
return JsonResponse({
'content': content,
}) | 5,327,278 |
def get_db():
"""Returns an sqlite3.Connection object stored in g.
Or creates it if doesn't exist yet."""
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db | 5,327,279 |
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
return isinstance(obj, basestring) | 5,327,280 |
def _export_gene_set_pan_genome(meth, pan_genome_id):
"""Export orthologs from Pangenome as external FeatureSet objects. [26]
:param pan_genome_id: ID of pangenome object [26.1]
:type pan_genome_id: kbtypes.KBaseGenomes.Pangenome
:ui_name pan_genome_id: Pangenome ID
:return: Generated Compare Genome
:rtype: kbtypes.KBaseGenomes.Pangenome
:output_widget: kbasePanGenome
"""
meth.stages = 1 # for reporting progress
return json.dumps({'ws': meth.workspace_id, 'name': pan_genome_id, 'withExport': 'true'}) | 5,327,281 |
def launch_dashboard():
"""Launch a dashboard displaying protocol summary and resource status."""
# Load the protocol & define a resource monitor instance (on local machine)
protocol = MLEProtocol("mle_protocol.db")
resource = MLEResource(resource_name="local", monitor_config={})
# You can also monitor slurm or grid engine clusters
# resource = MLEResource(
# resource_name="slurm-cluster",
# monitor_config={"partitions": ["partition-1", "partition-2"]},
# )
# resource = MLEResource(
# resource_name="sge-cluster",
# monitor_config={"queues": ["queue-1", "queue-2"]}
# )
dashboard = MLEDashboard(protocol, resource)
# Run the dashboard in a while loop
dashboard.live() | 5,327,282 |
def collect_includes(formula):
""" one of the most basic things to know about a module is which
include files it comes with: e.g. for boost you're supposed
to #include "boost/regex/foo.h", not #include "regex/foo.h"
or #include "foo.h".
For most modules this list of #includes can be generated from
the module's unpacked archive directly (assuming the root
include directories are listed in the module's artifact list),
but for the kinds of modules that generate or modify #include
files during ./configure you should collect_includes only after
./configure or even after make. These kinds of libs are hopefully
rare though.
This func here will modify the $formula in-place, adding the
list of #include files as an 'includes' property.
Returns a list of TwoComponentPath objects.
"""
gyp = get_library().load_gyp(formula)
module = formula['module']
version = formula['version']
gyp_root_dir = os.path.join('./bru_modules', module)
# here we assume the gyp file is located in gyp_root_dir
include_files = []
for target in gyp['targets']:
if not 'include_dirs' in target:
continue # e.g. target zlib:zlib_test doesn't need include_dirs
include_dirs = target['include_dirs']
for include_dir in include_dirs:
abs_include_dir = os.path.join(gyp_root_dir, include_dir)
include_files += [TwoComponentPath(abs_include_dir, include_file)
for include_file
in get_include_files(abs_include_dir)]
#assert len(include_files) > 0, "missing includes for " + module
if len(include_files) == 0:
# didn't create an ICU gyp file yet, looks painful to me
print("WARNING: no includes for module ", module)
return include_files | 5,327,283 |
def get_log_by_date(log_file, log_capture_date, log_capture_date_option, log_capture_maxlen, log_min_level):
"""
capture log files based on capture_date before or after fields
:param log_file:
:param log_capture_date epoch formatted field in milliseconds
:param log_capture_date_option: 'before', 'on', or 'after'
:param log_capture_maxlen: # of lines to capture at end of list
:param log_min_level: DEBUG, INFO, WARNING, ERROR levels to filter DEBUG is all, INFO imcludes
WARNING and ERROR, etc.
:return: list of log fields to capture
"""
log = logging.getLogger(__name__)
# read from the beginning looking for lines to capture based on timestamp
time_struct = time.localtime(log_capture_date/1000)
compare_date = datetime.fromtimestamp(time.mktime(time_struct))
if log_capture_date_option == 'on':
compare_date = compare_date.replace(hour=0, minute=0, second=0, microsecond=0)
log.debug("Looking for date: {}".format(time.strftime(DATE_TIME_FORMAT, time_struct)))
captured_list = []
result_line = None
triggered = False
for line in get_log_file_data(log_file):
capture = False
# attempt to get the date string from the log entry.
# Some entries are multi-line, so not all lines will have a date string
try:
if log_capture_date_option == 'on':
log_file_date = datetime.strptime(line.split(' ', 1)[0], DATE_FORMAT)
else:
log_file_date = datetime.strptime(' '.join(line.split(' ', 2)[:2]), DATE_TIME_MS_FORMAT)
except (ValueError, TypeError):
log_file_date = None
if not triggered:
if log_file_date:
if log_capture_date_option == 'before' and log_file_date <= compare_date:
triggered = True
capture = True
elif log_capture_date_option == 'on' and log_file_date == compare_date:
triggered = True
capture = True
elif log_capture_date_option == 'after' and log_file_date >= compare_date:
triggered = True
capture = True
else:
# don't capture after the compare_date
if log_capture_date_option == 'before' and log_file_date:
if log_file_date <= compare_date:
capture = True
else:
break
# only capture for the given date
elif log_capture_date_option == 'on' and log_file_date:
if log_file_date == compare_date:
capture = True
else:
break
else:
capture = True
if capture:
result_line = filter_log_level(log_min_level, line, multi_line=result_line)
if result_line:
captured_list.append(line)
# add maxlen
if log_capture_maxlen:
d = deque(captured_list, maxlen=log_capture_maxlen)
d_list = list(d)
captured_lines = "".join(d_list)
num_of_lines = len(d_list)
else:
captured_lines = "".join(captured_list)
num_of_lines = len(captured_list)
return num_of_lines, captured_lines | 5,327,284 |
def makeId(timestamp=0, machine=0, flow=0):
"""
using unix style timestamp, not python timestamp
"""
timestamp -= _base
return (timestamp << 13) | (machine << 8) | flow | 5,327,285 |
def write_dihed_to_file(structs, outname, verbose=True):
"""
Write the dihedral angles of a list of structures into a tab-separated
file where each line represents a single structure and the columns
alternate phi, psi angles for each residue
:param structs: list of Structure objects
:param outname: path to where the dihed file should be written
:param verbose: If True updates will be written to terminal
"""
fout = open(outname, 'w+')
i = 0
start = time.time()
if verbose:
print "Writing dihedral angles to a file. This may take some time"
for s in structs:
all_dihed = s.get_all_dihed()
phis = []
psis = []
for j in range(1, max(all_dihed.keys())):
phis.append(all_dihed[j][0])
psis.append(all_dihed[j][1])
outstr = ''
outstr += "%f\t" % psis[0]
for j in range(1, len(phis) - 1):
outstr += "%f\t%f\t" % (phis[j], psis[j])
outstr += "%f\n" % psis[-1]
fout.write(outstr)
i += 1
if verbose:
if i % 100 == 0:
print "Number of structures read: %i / %i in %fs" % \
(i, len(structs), time.time() - start)
fout.close() | 5,327,286 |
def _get_token(cls, token_type):
"""
when token expire flush,return token value
"""
assert token_type in ['tenant_access_token', 'app_access_token'], token_type
if not hasattr(cls.request, token_type) or hasattr(cls.request, token_type) or\
time.time() >= getattr(cls.request, token_type)['expire']:
setattr(cls.request, token_type, getattr(cls, 'get_%s' % token_type)())
return getattr(cls.request, token_type)[token_type] | 5,327,287 |
def crop_yield_plot(data_dict, savepath, quantiles=SOYBEAN_QUANTILES):
"""
For the most part, reformatting of
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/6%20result_analysis/yield_map.py
"""
# load the svg file
svg = Path('data/counties.svg').open('r').read()
# Load into Beautiful Soup
soup = BeautifulSoup(svg, features="html.parser")
# Find counties
paths = soup.findAll('path')
path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;stroke-width:0.1' \
';stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start' \
':none;stroke-linejoin:bevel;fill:'
for p in paths:
if p['id'] not in ["State_Lines", "separator"]:
try:
rate = data_dict[p['id']]
except KeyError:
continue
if rate > quantiles[0.95]:
color_class = 6
elif rate > quantiles[0.8]:
color_class = 5
elif rate > quantiles[0.6]:
color_class = 4
elif rate > quantiles[0.4]:
color_class = 3
elif rate > quantiles[0.2]:
color_class = 2
elif rate > quantiles[0.05]:
color_class = 1
else:
color_class = 0
color = colors[color_class]
p['style'] = path_style + color
soup = soup.prettify()
with savepath.open('w') as f:
f.write(soup) | 5,327,288 |
def chunk(it: Iterator, size: int) -> Iterator:
""" Nice chunking method from: https://stackoverflow.com/a/22045226 """
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ()) | 5,327,289 |
def sort_by_directory(path):
"""returns 0 if path is a directory, otherwise 1 (for sorting)"""
return 1 - path.is_directory | 5,327,290 |
def read_config(config):
"""
Read config file containing information of type and default values of fields
:param config: path to config file
:return: dictionary containing type and or default value for each field in the file
"""
dic_types = json.load(open(config, 'r'))
to_remove = []
for attribute, value in dic_types.items():
ls_val = value.keys()
if 'type' in ls_val:
val = value['type']
value['type'] = str_to_type(val)
none_type = False
if not value['type']:
none_type = True
if not 'default' in ls_val and none_type:
to_remove.append(attribute)
value['type'] = val
for to_rm in to_remove:
print(' [WARN] Config for' , '\'' + to_rm + '\'', 'incorrect and ommitted: Type', '\'' + dic_types[to_rm]['type'] + '\'' , 'is not valid and no default value is indicated')
del dic_types[to_rm]
return dic_types | 5,327,291 |
def is_right_hand_coordinate_system3(pose):
"""Checks whether the given pose follows the right-hand rule."""
n, o, a = pose[:3, 0], pose[:3, 1], pose[:3, 2]
return n.dot(n).simplify() == 1 and o.dot(o).simplify() == 1 and a.dot(a).simplify() == 1 and sp.simplify(n.cross(o)) == a | 5,327,292 |
def model_airmassfit(hjd, am, rawflux, limbB1, limB2, inc, period, a_Rs, Rp_Rs, show=False):
"""
Return the bestfit model for the lightcurve using 4 models of airmass correction:
1. model with no airmass correction
2. model with exponential airmass correction
3. model with linear airmass correction
4, model with 2deg polynomial airmass correction
___
INPUT:
hjd:
am:
rawflux:
limbB1:
limbB2:
inc:
period:
a_Rs:
startpar:
OUTPUT:
result: dataframe structure with besfit values for each model, the errors and BIC values.
phase: from the bestfit model
lc: lightcurve from the bestfit model
"""
# Model 1: no airmass correction
startpar = [Rp_Rs, np.mean(hjd), 1., 0.]
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)}, {'value':np.mean(hjd)}, {'value':1.}, {'value':0.,'fixed':True}]
pfit1, results1 = mpyfit.fit(residuals_am_exp, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model1 = model_am_exp(hjd,pfit1[0],pfit1[1],pfit1[2],pfit1[3])
phase1 = (hjd - pfit1[1])/period
if show == True:
print '...'
print 'Model 1: no airmass correction'
print 'bestfit values = ',pfit1
print 'error = ', results1['parerrors']
print 'bestnorm1 = ', results1['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model1)
#Model 2: exponential airmass correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)}, {'value':np.mean(hjd)}, {'value':1.}, {'value':0.,'fixed':False}]
pfit2, results2 = mpyfit.fit(residuals_am_exp, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model2 = model_am_exp(hjd,pfit2[0],pfit2[1],pfit2[2],pfit2[3])
phase2 = (hjd - pfit2[1])/period
if show == True:
print '...'
print 'Model 2: exponential airmass correction'
print 'bestfit values = ',pfit2
print 'error = ', results2['parerrors']
print 'bestnorm1 = ', results2['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model2)
#Model 3: linear airmass correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)},{'value':np.mean(hjd)},{'value':1.}, {'value':0.,'fixed':False}]
pfit3, results3 = mpyfit.fit(residuals_linear, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model3 = model_am_linear(hjd,pfit3[0],pfit3[1],pfit3[2],pfit3[3])
phase3 = (hjd - pfit3[1])/period
if show == True:
print '...'
print 'Model 3: linear airmass correction'
print 'bestfit values = ',pfit3
print 'error = ', results3['parerrors']
print 'bestnorm1 = ', results3['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model3)
#Model 4: 2deg polynomial airmss correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)},{'value':np.mean(hjd)},{'value':1.},{'value':0.},{'value':0.}]
pstart = [Rp_Rs,np.mean(hjd),1.,0.,0.]
pfit4, results4 = mpyfit.fit(residuals_2deg_mpfit, pstart, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model4 = model_am_2deg(hjd,pfit4[0],pfit4[1],pfit4[2],pfit4[3],pfit4[4])
phase4 = (hjd - pfit4[1])/period
if show == True:
print '...'
print 'Model 4: 2deg poly airmass correction'
print 'bestfit values = ',pfit4
print 'error = ', results4['parerrors']
print 'bestnorm1 = ', results4['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model4)
#Obtain BIC values:
#Let's create our fit file and our best BIC
BICarray = ['none', 'exponential', 'linear','2nd_deg_poly']
nfree = [3,4,4,5]
bestnorm = [results1['bestnorm'],results2['bestnorm'],results3['bestnorm'],results4['bestnorm']]
bic = BIC(nfree,bestnorm,len(rawflux))
RpRs = [pfit1[0], pfit2[0], pfit3[0], pfit4[0]]
Tc = [pfit1[1], pfit2[1], pfit3[1], pfit4[1]]
a = [pfit1[2], pfit2[2], pfit3[2], pfit4[2]]
b = [pfit1[3], pfit2[3], pfit3[3], pfit4[3]]
c = ['Nan','Nan','Nan',pfit4[4]]
error1 = [results1['parerrors'][0], results2['parerrors'][0], results3['parerrors'][0], results4['parerrors'][0]]
error2 = [results1['parerrors'][1], results2['parerrors'][1], results3['parerrors'][1], results4['parerrors'][1]]
error3 = [results1['parerrors'][2], results2['parerrors'][2], results3['parerrors'][2], results4['parerrors'][2]]
error4 = [results1['parerrors'][3], results2['parerrors'][3], results3['parerrors'][3], results4['parerrors'][3]]
error5 = ['Nan','Nan','Nan', results4['parerrors'][0]]
result = DataFrame([BICarray,list(bic),RpRs,error1,Tc,error2,a,error3,b,error4,c,error5]).T
result.columns=['Model','BIC','RpRs','eRpRs','Tc','eTc','a','ea','b','eb','c','ec']
if show == True:
print '... Results:'
print result
print 'The best model is: ',result.Model[result.BIC == result.BIC.min()]
print 'with the BIC = ',result.BIC.min()
#Saving the bestfit transit image:
bestfit = np.where(result.BIC == result.BIC.min())
indx = bestfit[0][0]
if indx == 0:
lc = model1
phase = phase1
if indx == 1:
lc = model2
phase = phase2
if indx == 2:
lc = model3
phase = phase3
if indx == 3:
lc = model4
phase = phase4
return result, phase, lc | 5,327,293 |
def common_params_for_list(args, fields, field_labels):
"""Generate 'params' dict that is common for every 'list' command.
:param args: arguments from command line.
:param fields: possible fields for sorting.
:param field_labels: possible field labels for sorting.
:returns: a dict with params to pass to the client method.
"""
params = {}
if args.limit is not None:
if args.limit < 0:
raise exc.CommandError(
_('Expected non-negative --limit, got %s') % args.limit)
params['limit'] = args.limit
if args.sort_key is not None:
# Support using both heading and field name for sort_key
fields_map = dict(zip(field_labels, fields))
fields_map.update(zip(fields, fields))
try:
sort_key = fields_map[args.sort_key]
except KeyError:
raise exc.CommandError(
_("%(sort_key)s is an invalid field for sorting, "
"valid values for --sort-key are: %(valid)s") %
{'sort_key': args.sort_key,
'valid': list(fields_map)})
params['sort_key'] = sort_key
if args.sort_dir is not None:
if args.sort_dir not in ('asc', 'desc'):
raise exc.CommandError(
_("%s is an invalid value for sort direction, "
"valid values for --sort-dir are: 'asc', 'desc'") %
args.sort_dir)
params['sort_dir'] = args.sort_dir
marker = getattr(args, 'marker', None)
if marker is not None:
params['marker'] = marker
params['detail'] = args.detail
return params | 5,327,294 |
def create_role(role, permissions=None):
"""Creates a Search Guard role. Returns when successfully created
When no permissions are specified, we use some default cluster permissions.
:param str role: Name of the role to create in Search Guard
:param dict permissions: Search Guard role permissions (default is read access to cluster)
:raises: RoleAlreadyExistsException, CreateRoleException
"""
if not check_role_exists(role):
# The role does not exist, let's create it
# When no permissions are requested, we only add basic cluster perms, no indice perms.
payload = {'cluster': ["indices:data/read/mget", "indices:data/read/msearch"]}
if permissions:
payload = permissions
create_sg_role = requests.put('{}/roles/{}'.format(settings.SEARCHGUARD_API_URL, role),
data=json.dumps(payload), headers=settings.HEADER, auth=settings.SEARCHGUARD_API_AUTH)
if create_sg_role.status_code == 201:
# Role created successfully
return
else:
# Raise exception because we received an error when creating the role
raise CreateRoleException('Error creating role {} - msg: {}'.format(role, create_sg_role.text))
else:
raise RoleAlreadyExistsException('Role {} already exists'.format(role)) | 5,327,295 |
def _finditem(obj, key):
"""
Check if giben key exists in an object
:param obj: dictionary/list
:param key: key
:return: value at the key position
"""
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item | 5,327,296 |
def differences(sequence):
"""
Differences of the given sequence
"""
a, b = next(sequence), next(sequence)
while True:
yield b-a
a,b = b,next(sequence) | 5,327,297 |
def assign_bonus(client, bonus_list_path):
"""
Assign bonuses to group of workers.
A csv file with following columns need to be provided: workerId, assignmentId, bonusAmount, reason
:param client: boto3 client object for communicating to MTurk
:param bonus_list_path: path to the csv file with following columns:workerId, assignmentId, bonusAmount, reason
:return:
"""
print('Sending bonuses...')
with open(bonus_list_path, 'r') as bonus_list:
entries = list(csv.DictReader(bonus_list))
bonus_amounts = [float(entry['bonusAmount']) for entry in entries]
num_bonus_workers = len(bonus_amounts)
total_bonus = round(sum(bonus_amounts), 2)
max_bonus = max(bonus_amounts)
mean_bonus = round(total_bonus / num_bonus_workers, 2)
median_bonus = statistics.median(bonus_amounts)
print(f'Number of workers: {num_bonus_workers}, total: {total_bonus}, max: {max_bonus}, mean: {mean_bonus}, median: {median_bonus}')
proceed = input('Proceed (y/N)?: ')
if len(proceed) > 0 and proceed.lower() not in ['y', 'n']:
exit(f'Unknown value "{proceed}"')
if len(proceed) == 0 or proceed.lower() == 'n':
exit()
failed = 0
for row in entries:
assert 'workerId' in row
assert 'assignmentId' in row
assert 'bonusAmount' in row
assert 'reason' in row
response = client.send_bonus(
WorkerId=row['workerId'],
BonusAmount=row['bonusAmount'],
AssignmentId=row['assignmentId'],
Reason=row['reason']
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
print(f'Failed to send for {row}')
failed += 1
print(f'Bonuses sent, failed {failed}, succeeded {num_bonus_workers - failed}') | 5,327,298 |
def decompress(obj):
"""Decompress LZSS-compressed bytes or a file-like object.
Shells out to decompress_file() or decompress_bytes() depending on
whether or not the passed-in object has a 'read' attribute or not.
Returns a bytearray."""
if hasattr(obj, 'read'):
return decompress_file(obj)
else:
return decompress_bytes(obj) | 5,327,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.