content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def checksum2(path):
"""Calculate the checksum of a TSV.
The checksum of a TSV is calculated as the sum of the division between the
only two numbers in each row that evenly divide each other.
Arguments
---------
path : str
Path to a TSV file.
Returns
-------
The checksum of the file.
"""
lines = read_tsv(path)
def _even_division(line):
for i, a in enumerate(line):
# Copy the line and remove the current element (left)
line_copy = copy(line)
line_copy.pop(i)
for b in line_copy:
if is_int(a / b):
return int(a / b)
if is_int(b / a):
return int(b / a)
raise ValueError('No even divisions found!')
divisions = [_even_division(line) for line in lines]
return sum(divisions) | 5,323,900 |
def load_config(config_file_path):
"""
Load the config ini, parse settings to WORC
Args:
config_file_path (String): path of the .ini config file
Returns:
settings_dict (dict): dict with the loaded settings
"""
settings = configparser.ConfigParser()
settings.read(config_file_path)
print(settings.keys())
settings_dict = {'General': dict(), 'CrossValidation': dict(),
'Labels': dict(), 'HyperOptimization': dict(),
'Classification': dict(), 'SelectFeatGroup': dict(),
'Featsel': dict(), 'FeatureScaling': dict(),
'SampleProcessing': dict(), 'Imputation': dict(),
'Ensemble': dict()}
settings_dict['General']['cross_validation'] =\
settings['General'].getboolean('cross_validation')
settings_dict['General']['Joblib_ncores'] =\
settings['General'].getint('Joblib_ncores')
settings_dict['General']['Joblib_backend'] =\
str(settings['General']['Joblib_backend'])
settings_dict['General']['tempsave'] =\
settings['General'].getboolean('tempsave')
settings_dict['Featsel']['Variance'] =\
[str(item).strip() for item in
settings['Featsel']['Variance'].split(',')]
settings_dict['Featsel']['SelectFromModel'] =\
[str(item).strip() for item in
settings['Featsel']['SelectFromModel'].split(',')]
settings_dict['Featsel']['GroupwiseSearch'] =\
[str(item).strip() for item in
settings['Featsel']['GroupwiseSearch'].split(',')]
settings_dict['Featsel']['UsePCA'] =\
[str(item).strip() for item in
settings['Featsel']['UsePCA'].split(',')]
settings_dict['Featsel']['PCAType'] =\
[str(item).strip() for item in
settings['Featsel']['PCAType'].split(',')]
settings_dict['Featsel']['StatisticalTestUse'] =\
[str(item).strip() for item in
settings['Featsel']['StatisticalTestUse'].split(',')]
settings_dict['Featsel']['StatisticalTestMetric'] =\
[str(item).strip() for item in
settings['Featsel']['StatisticalTestMetric'].split(',')]
settings_dict['Featsel']['StatisticalTestThreshold'] =\
[float(str(item).strip()) for item in
settings['Featsel']['StatisticalTestThreshold'].split(',')]
settings_dict['Featsel']['ReliefUse'] =\
[str(item).strip() for item in
settings['Featsel']['ReliefUse'].split(',')]
settings_dict['Featsel']['ReliefNN'] =\
[int(str(item).strip()) for item in
settings['Featsel']['ReliefNN'].split(',')]
settings_dict['Featsel']['ReliefSampleSize'] =\
[int(str(item).strip()) for item in
settings['Featsel']['ReliefSampleSize'].split(',')]
settings_dict['Featsel']['ReliefDistanceP'] =\
[int(str(item).strip()) for item in
settings['Featsel']['ReliefDistanceP'].split(',')]
settings_dict['Featsel']['ReliefNumFeatures'] =\
[int(str(item).strip()) for item in
settings['Featsel']['ReliefNumFeatures'].split(',')]
settings_dict['Imputation']['use'] =\
[str(item).strip() for item in
settings['Imputation']['use'].split(',')]
settings_dict['Imputation']['strategy'] =\
[str(item).strip() for item in
settings['Imputation']['strategy'].split(',')]
settings_dict['Imputation']['n_neighbors'] =\
[int(str(item).strip()) for item in
settings['Imputation']['n_neighbors'].split(',')]
settings_dict['General']['FeatureCalculator'] =\
str(settings['General']['FeatureCalculator'])
# Feature selection options
for key in settings['SelectFeatGroup'].keys():
settings_dict['SelectFeatGroup'][key] =\
[str(item).strip() for item in
settings['SelectFeatGroup'][key].split(',')]
# Classification options
settings_dict['Classification']['fastr'] =\
settings['Classification'].getboolean('fastr')
settings_dict['Classification']['fastr_plugin'] =\
str(settings['Classification']['fastr_plugin'])
settings_dict['Classification']['classifiers'] =\
[str(item).strip() for item in
settings['Classification']['classifiers'].split(',')]
settings_dict['Classification']['max_iter'] =\
[int(str(item).strip()) for item in
settings['Classification']['max_iter'].split(',')]
# Specific SVM options
settings_dict['Classification']['SVMKernel'] =\
[str(item).strip() for item in
settings['Classification']['SVMKernel'].split(',')]
settings_dict['Classification']['SVMC'] =\
[int(str(item).strip()) for item in
settings['Classification']['SVMC'].split(',')]
settings_dict['Classification']['SVMdegree'] =\
[int(str(item).strip()) for item in
settings['Classification']['SVMdegree'].split(',')]
settings_dict['Classification']['SVMcoef0'] =\
[int(str(item).strip()) for item in
settings['Classification']['SVMcoef0'].split(',')]
settings_dict['Classification']['SVMgamma'] =\
[int(str(item).strip()) for item in
settings['Classification']['SVMgamma'].split(',')]
# Specific RF options
settings_dict['Classification']['RFn_estimators'] =\
[int(str(item).strip()) for item in
settings['Classification']['RFn_estimators'].split(',')]
settings_dict['Classification']['RFmin_samples_split'] =\
[int(str(item).strip()) for item in
settings['Classification']['RFmin_samples_split'].split(',')]
settings_dict['Classification']['RFmax_depth'] =\
[int(str(item).strip()) for item in
settings['Classification']['RFmax_depth'].split(',')]
# Specific LR options
settings_dict['Classification']['LRpenalty'] =\
[str(item).strip() for item in
settings['Classification']['LRpenalty'].split(',')]
settings_dict['Classification']['LRC'] =\
[float(str(item).strip()) for item in
settings['Classification']['LRC'].split(',')]
# Specific LDA/QDA options
settings_dict['Classification']['LDA_solver'] =\
[str(item).strip() for item in
settings['Classification']['LDA_solver'].split(',')]
settings_dict['Classification']['LDA_shrinkage'] =\
[int(str(item).strip()) for item in
settings['Classification']['LDA_shrinkage'].split(',')]
settings_dict['Classification']['QDA_reg_param'] =\
[int(str(item).strip()) for item in
settings['Classification']['QDA_reg_param'].split(',')]
# ElasticNet options
settings_dict['Classification']['ElasticNet_alpha'] =\
[int(str(item).strip()) for item in
settings['Classification']['ElasticNet_alpha'].split(',')]
settings_dict['Classification']['ElasticNet_l1_ratio'] =\
[float(str(item).strip()) for item in
settings['Classification']['ElasticNet_l1_ratio'].split(',')]
# SGD (R) options
settings_dict['Classification']['SGD_alpha'] =\
[int(str(item).strip()) for item in
settings['Classification']['SGD_alpha'].split(',')]
settings_dict['Classification']['SGD_l1_ratio'] =\
[float(str(item).strip()) for item in
settings['Classification']['SGD_l1_ratio'].split(',')]
settings_dict['Classification']['SGD_loss'] =\
[str(item).strip() for item in
settings['Classification']['SGD_loss'].split(',')]
settings_dict['Classification']['SGD_penalty'] =\
[str(item).strip() for item in
settings['Classification']['SGD_penalty'].split(',')]
# Naive Bayes options
settings_dict['Classification']['CNB_alpha'] =\
[int(str(item).strip()) for item in
settings['Classification']['CNB_alpha'].split(',')]
# Cross validation settings
settings_dict['CrossValidation']['N_iterations'] =\
settings['CrossValidation'].getint('N_iterations')
settings_dict['CrossValidation']['test_size'] =\
settings['CrossValidation'].getfloat('test_size')
# Genetic settings
settings_dict['Labels']['label_names'] =\
[str(item).strip() for item in
settings['Labels']['label_names'].split(',')]
settings_dict['Labels']['modus'] =\
str(settings['Labels']['modus'])
# Settings for hyper optimization
settings_dict['HyperOptimization']['scoring_method'] =\
str(settings['HyperOptimization']['scoring_method'])
settings_dict['HyperOptimization']['test_size'] =\
settings['HyperOptimization'].getfloat('test_size')
settings_dict['HyperOptimization']['N_iter'] =\
settings['HyperOptimization'].getint('N_iterations')
settings_dict['HyperOptimization']['n_jobspercore'] =\
int(settings['HyperOptimization']['n_jobspercore'])
settings_dict['FeatureScaling']['scale_features'] =\
settings['FeatureScaling'].getboolean('scale_features')
settings_dict['FeatureScaling']['scaling_method'] =\
str(settings['FeatureScaling']['scaling_method'])
settings_dict['SampleProcessing']['SMOTE'] =\
[str(item).strip() for item in
settings['SampleProcessing']['SMOTE'].split(',')]
settings_dict['SampleProcessing']['SMOTE_ratio'] =\
[int(str(item).strip()) for item in
settings['SampleProcessing']['SMOTE_ratio'].split(',')]
settings_dict['SampleProcessing']['SMOTE_neighbors'] =\
[int(str(item).strip()) for item in
settings['SampleProcessing']['SMOTE_neighbors'].split(',')]
settings_dict['SampleProcessing']['Oversampling'] =\
[str(item).strip() for item in
settings['SampleProcessing']['Oversampling'].split(',')]
settings_dict['Ensemble']['Use'] =\
settings['Ensemble'].getboolean('Use')
return settings_dict | 5,323,901 |
def preinit_js9():
"""Pre-initialization, when Javascript is not available yet. Determines paths and starts helper processs"""
global radiopadre_kernel
import radiopadre_kernel
import iglesia
global JS9_HELPER_PORT, JS9_DIR
JS9_DIR = iglesia.JS9_DIR
JS9_HELPER_PORT = iglesia.JS9HELPER_PORT
try:
global JS9_ERROR
if not os.path.exists(JS9_DIR):
raise JS9Error(f"{JS9_DIR} does not exist")
message(f"Using JS9 install in {JS9_DIR}")
global RADIOPADRE_INSTALL_PREFIX
global RADIOPADRE_LOCAL_PREFIX
global JS9_INSTALL_PREFIX
global JS9_INIT_HTML_STATIC
global JS9_INIT_HTML_DYNAMIC
global JS9_SCRIPT_PREFIX
global JS9_LOCAL_SETTINGS
RADIOPADRE_INSTALL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/radiopadre-www" # URL used to access radiopadre code
RADIOPADRE_LOCAL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/{radiopadre_kernel.ABSROOTDIR}/.radiopadre" # URL used to access radiopadre aux dir
JS9_INSTALL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/js9-www" # URL used to access JS9 code
JS9_SCRIPT_PREFIX = radiopadre_kernel.SHADOW_URL_PREFIX
JS9_LOCAL_SETTINGS = f"{radiopadre_kernel.SESSION_URL}/js9prefs.js"
# load templated init HTML
try:
with open(os.path.join(DIRNAME, "js9-init-static-template.html"), "rt") as inp:
JS9_INIT_HTML_STATIC = inp.read().format(**globals())
with open(os.path.join(DIRNAME, "js9-init-dynamic-template.html"), "rt") as inp:
JS9_INIT_HTML_DYNAMIC = inp.read().format(**globals())
except Exception as exc:
traceback.print_exc()
JS9_ERROR = "Error reading init templates: {}".format(str(exc))
except JS9Error as exc:
if exc.message:
JS9_ERROR = exc.message
# on error, init code replaced by error message
if JS9_ERROR:
error(f"JS9 init error: {JS9_ERROR}") | 5,323,902 |
def find_toplevel() -> pathlib.Path:
"""Get the toplevel git directory."""
return pathlib.Path(cmd_output(["rev-parse", "--show-toplevel"]).strip()) | 5,323,903 |
def db20(value):
"""Convert voltage-like value to dB."""
return 20 * log10(np.abs(value)) | 5,323,904 |
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
"""Finds the python class with the given name and constructs it with the given arguments."""
return call_func_by_name(*args, func_name=class_name, **kwargs) | 5,323,905 |
def simple_file_scan(reader, bucket_name, region_name, file_name):
""" Does an initial scan of the file, figuring out the file row count and which rows are too long/short
Args:
reader: the csv reader
bucket_name: the bucket to pull from
region_name: the region to pull from
file_name: name of the file to pull
Returns:
file_row_count: the number of lines in the file
short_rows: a list of row numbers that have too few fields
long_rows: a list of rows that have too many fields
"""
# Count file rows: throws a File Level Error for non-UTF8 characters
# Also getting short and long rows for formatting errors and pandas processing
temp_file = open(reader.get_filename(region_name, bucket_name, file_name), encoding='utf-8')
file_row_count = 0
header_length = 0
short_rows = []
long_rows = []
# Getting the delimiter
header_line = temp_file.readline()
delimiter = '|' if header_line.count('|') > header_line.count(',') else ','
temp_file.seek(0)
for line in csv.reader(temp_file, delimiter=delimiter):
if line:
file_row_count += 1
line_length = len(line)
# Setting the expected length for the file
if header_length == 0:
header_length = line_length
# All lines that are shorter than they should be
elif line_length < header_length:
short_rows.append(file_row_count)
# All lines that are longer than they should be
elif line_length > header_length:
long_rows.append(file_row_count)
try:
temp_file.close()
except AttributeError:
# File does not exist, and so does not need to be closed
pass
return file_row_count, short_rows, long_rows | 5,323,906 |
def delete_lblannots(ibs, lblannot_rowid_list):
"""deletes lblannots from the database"""
if ut.VERBOSE:
logger.info('[ibs] deleting %d lblannots' % len(lblannot_rowid_list))
ibs.db.delete_rowids(const.LBLANNOT_TABLE, lblannot_rowid_list) | 5,323,907 |
def random_plane(model: typing.Union[torch.nn.Module, ModelWrapper], metric: Metric, distance=1, steps=20,
normalization='filter', deepcopy_model=False) -> np.ndarray:
"""
Returns the computed value of the evaluation function applied to the model or agent along a planar
subspace of the parameter space defined by a start point and two randomly sampled directions.
The models supplied can be either torch.nn.Module models, or ModelWrapper objects
from the loss_landscapes library for more complex cases.
That is, given a neural network model, whose parameters define a point in parameter
space, and a distance, the loss is computed at 'steps' * 'steps' points along the
plane defined by the two random directions, from the start point up to the maximum
distance in both directions.
Note that the dimensionality of the model parameters has an impact on the expected
length of a uniformly sampled other in parameter space. That is, the more parameters
a model has, the longer the distance in the random other's direction should be,
in order to see meaningful change in individual parameters. Normalizing the
direction other according to the model's current parameter values, which is supported
through the 'normalization' parameter, helps reduce the impact of the distance
parameter. In future releases, the distance parameter will refer to the maximum change
in an individual parameter, rather than the length of the random direction other.
Note also that a simple planar approximation with randomly sampled directions can produce
misleading approximations of the loss landscape due to the scale invariance of neural
networks. The sharpness/flatness of minima or maxima is affected by the scale of the neural
network weights. For more details, see `https://arxiv.org/abs/1712.09913v3`. It is
recommended to normalize the directions, preferably with the 'filter' option.
The Metric supplied has to be a subclass of the loss_landscapes.metrics.Metric class,
and must specify a procedure whereby the model passed to it is evaluated on the
task of interest, returning the resulting quantity (such as loss, loss gradient, etc).
:param model: the model defining the origin point of the plane in parameter space
:param metric: function of form evaluation_f(model), used to evaluate model loss
:param distance: maximum distance in parameter space from the start point
:param steps: at how many steps from start to end the model is evaluated
:param normalization: normalization of direction vectors, must be one of 'filter', 'layer', 'model'
:param deepcopy_model: indicates whether the method will deepcopy the model(s) to avoid aliasing
:return: 1-d array of loss values along the line connecting start and end models
"""
model_start_wrapper = wrap_model(copy.deepcopy(model) if deepcopy_model else model)
start_point = model_start_wrapper.get_module_parameters()
dir_one = rand_u_like(start_point)
dir_two = orthogonal_to(dir_one)
if normalization == 'model':
dir_one.model_normalize_(start_point)
dir_two.model_normalize_(start_point)
elif normalization == 'layer':
dir_one.layer_normalize_(start_point)
dir_two.layer_normalize_(start_point)
elif normalization == 'filter':
dir_one.filter_normalize_(start_point)
dir_two.filter_normalize_(start_point)
elif normalization is None:
pass
else:
raise AttributeError('Unsupported normalization argument. Supported values are model, layer, and filter')
# scale to match steps and total distance
dir_one.mul_(((start_point.model_norm() * distance) / steps) / dir_one.model_norm())
dir_two.mul_(((start_point.model_norm() * distance) / steps) / dir_two.model_norm())
# Move start point so that original start params will be in the center of the plot
dir_one.mul_(steps / 2)
dir_two.mul_(steps / 2)
start_point.sub_(dir_one)
start_point.sub_(dir_two)
dir_one.truediv_(steps / 2)
dir_two.truediv_(steps / 2)
data_matrix = []
# evaluate loss in grid of (steps * steps) points, where each column signifies one step
# along dir_one and each row signifies one step along dir_two. The implementation is again
# a little convoluted to avoid constructive operations. Fundamentally we generate the matrix
# [[start_point + (dir_one * i) + (dir_two * j) for j in range(steps)] for i in range(steps].
for i in range(steps):
data_column = []
for j in range(steps):
# for every other column, reverse the order in which the column is generated
# so you can easily use in-place operations to move along dir_two
if i % 2 == 0:
start_point.add_(dir_two)
data_column.append(metric(model_start_wrapper))
else:
start_point.sub_(dir_two)
data_column.insert(0, metric(model_start_wrapper))
data_matrix.append(data_column)
start_point.add_(dir_one)
return np.array(data_matrix) | 5,323,908 |
def _solarize_impl(pil_img, level):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, min_max_vals.solarize.max)
return ImageOps.solarize(pil_img, 256 - level) | 5,323,909 |
def detect_area(hsv_img,lower_color,upper_color,marker_id,min_size,draw=False):
"""Detects the contour of an object containing a marker based on color
It always returns the smallest contour which still contains the marker
The contour is detected using an image with hsv color space to be robust under different lighting conditions.
If draw=True the systems draws all found contours as well as the current smalles one containing the marker onto hsv_img
:param hsv_image: a Image in hsv color space in which the contours should be detected
:type hsv_image: numpy array
:param lower_color: a 3x1 array containing the lower boundary for the color detection
:type lower_color: numpy array
:param upper_color: a 3x1 array containing the upper boundary for the color detection
:type upper_color: numpy array
:param marker_id: the ID of a 4x4 aruco marker which identifies the object
:type marker_id: scalar
:param hsv_img:
:param min_size:
:param draw: (Default value = False)
"""
# color detection
if lower_color[0] <=0:
second_lower = lower_color
second_lower[0] = 179+lower_color[0]
second_upper = upper_color
second_upper[0] = 179
lower_color[0] = 0
mask1 =cv2.inRange(hsv_img,lower_color,upper_color)
mask2 =cv2.inRange(hsv_img,second_lower,second_upper)
mask= mask1 | mask2
else:
mask =cv2.inRange(hsv_img,lower_color,upper_color)
#TODO carefull depending on opencv version the return may be different
contours, hierachy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#marker detection:
split_hsv = cv2.split(hsv_img)
gray = split_hsv[2]
center_dict = track_aruco_marker(gray,[marker_id])
center = center_dict[marker_id]
if np.any(center != None):
if draw == True:
cv2.drawContours(hsv_img, contours, -1, (0,255,255),3)
cv2.circle(hsv_img,(center[0],center[1]),7,(90,255,255),7)
#TODO smallest contour should be real contour encompassing whole image
row, col =hsv_img.shape[:2]
smallest_contour = np.array([[0,0],[0,row],[col,row],[col,0]])
#TODO not needet with real contour
contour_found = 0
for i in range(len(contours)):
marker_in_contour = True
marker_in_contour = cv2.pointPolygonTest(contours[i],tuple(center),False) > 0
marker_in_contour = marker_in_contour and cv2.contourArea(contours[i]) >= min_size
if marker_in_contour:
if cv2.contourArea(contours[i]) <= cv2.contourArea(smallest_contour):
contour_found = 1
smallest_contour = contours[i]
if contour_found == 1:
if draw == True:
cv2.drawContours(hsv_img, smallest_contour, -1, (90,255,255),6)
return smallest_contour
return None | 5,323,910 |
def payment_callback():
"""通用支付页面回调"""
data = request.params
sn = data['sn']
result = data['result']
is_success = result == 'SUCCESS'
handle = get_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC)
if handle:
# 是否成功,订单号,_数据
return handle(is_success, sn)
if is_success:
return render_template('info.html', title='支付结果', msg='支付成功-订单号:{1}'.format(sn))
return render_template('info.html', title='支付结果', msg='支付失败-订单号:{1}'.format(sn)) | 5,323,911 |
def get_users_name(path):
"""
登録されているユーザ情報の回収
Parameters
----------
path : str
homeディレクトリまでのパス
Returns
-------
name_dict : dict
登録ユーザ情報の辞書
"""
path_db = os.path.join(path, 'data', 'list.db')
name_list = []
with sqlite3.connect(path_db) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('select * from miyano')
for row in cur:
d = (row['number'], row['name'])
name_list.append(d)
cur.close()
name_dict = dict(name_list)
return name_dict | 5,323,912 |
def get_rest_parameter_state(parameter_parsing_states):
"""
Gets the rest parameter from the given content if there is any.
Parameters
----------
parameter_parsing_states `list` of ``ParameterParsingStateBase``
The created parameter parser state instances.
Returns
-------
parameter_parsing_state : ``ParameterParsingState``, `None`
"""
for parameter_parsing_state in parameter_parsing_states:
if parameter_parsing_state.content_parser_parameter.is_rest:
return parameter_parsing_state | 5,323,913 |
def random_user_id() -> str:
"""Return random user id as string."""
return generate_random_id() | 5,323,914 |
def import_plugin(name):
"""Tries to import given module"""
path = os.path.join(BASE_PATH, "backends", "plugins", name + ".py")
try:
with open(path, 'rb') as f:
try:
plugin = imp.load_module(
"p_" + name, f, name + '.py',
('.py', 'rb', imp.PY_SOURCE)
)
except SyntaxError as e:
raise ImportError(str(e))
except IOError as e:
raise ImportError(str(e))
return plugin | 5,323,915 |
def batch_answer_same_question(question: str, contexts: List[str]) -> List[str]:
"""Answers the question with the given contexts (local mode).
:param question: The question to answer.
:type question: str
:param contexts: The contexts to answer the question with.
:type contexts: List[str]
:return: The answers.
:rtype: List[str]
"""
if _answerer is None:
load_answerer()
assert _answerer is not None
tokenizer = get_answerer_tokenizer()
prompts = [
answerer_prompt.format(question=question, context=context)
for context in contexts
]
information = {
"prompt_length": max(len(tokenizer.encode(prompt)) for prompt in prompts)
}
parameters = format_parameters_to_local(answerer_parameters, information)
response = _answerer(prompts, **parameters)
return [
cut_on_stop(choices[0]["generated_text"], answerer_parameters["stop"])
for choices in response
] | 5,323,916 |
def test_delete_invite_timestamp_issue(client: flask.testing.FlaskClient) -> None:
"""Test that the delete_invite cronjob deletes invites with '0000-00-00 00:00:00' timestamp."""
assert len(db.session.query(models.Invite).all()) == 2
invites = db.session.query(models.Invite).all()
for invite in invites:
invite.created_at = "0000-00-00 00:00:00"
db.session.commit()
delete_invite()
assert len(db.session.query(models.Invite).all()) == 0 | 5,323,917 |
def rename13(dirs=None, basis_dir=None):
"""
Renames all ``*.13`` files in ``dirs`` to ``fort.13``
:param list dirs: list of directory names
"""
files = []
if dirs is None and basis_dir is None:
files = glob.glob(os.path.join('landuse_*', '*.13'))
elif dirs is None and basis_dir:
files = glob.glob(os.path.join(basis_dir, 'landuse_*', '*.13'))
else:
for d in dirs:
files.append(glob.glob(os.path.join(basis_dir, d)+'/*.13')[0])
for f in files:
os.rename(f, os.path.join(os.path.dirname(f), 'fort.13')) | 5,323,918 |
def ObitHelp (Task):
"""
Give Help for OBIT task Task
* Task = ObitTask name to give (e.g. "Feather")
"""
################################################################
t=ObitTask(Task)
t.help()
# end ObitHelp | 5,323,919 |
def run_cross_validation_v2(n_folds: int = 10) -> None:
""" V2 of Function to derive a keras solution with cross-validation for selected drivers
Args:
n_folds: Number of cross-validation folds
"""
np.random.seed(2016)
warnings.filterwarnings("ignore")
use_cache = 1
# input image dimensions
img_rows, img_cols = 64, 64
# color type: 1 - grey, 3 - rgb
color_type_global = 1
batch_size = 16
nb_epoch = 50
random_state = 51
restore_from_last_checkpoint = 0
train_data, train_target, train_id, driver_id, unique_drivers = \
read_and_normalize_train_data_rotated(img_rows, img_cols, use_cache, color_type_global)
test_data, test_id = read_and_normalize_test_data_rotated(img_rows, img_cols, use_cache, color_type_global)
model = create_model_v2(img_rows, img_cols, color_type_global)
y_full_train = dict()
y_full_test = []
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
num_fold = 0
sum_score = 0
for train_drivers, test_drivers in kf.split(unique_drivers):
unique_list_train = [unique_drivers[i] for i in train_drivers]
x_train, y_train, train_index = \
copy_selected_drivers(train_data, train_target, driver_id, unique_list_train)
unique_list_valid = [unique_drivers[i] for i in test_drivers]
x_valid, y_valid, test_index = \
copy_selected_drivers(train_data, train_target, driver_id, unique_list_valid)
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, n_folds))
print('Split train: ', len(x_train), len(y_train))
print('Split valid: ', len(x_valid), len(y_valid))
print('Train drivers: ', unique_list_train)
print('Test drivers: ', unique_list_valid)
k_fold_weights_path = os.path.join(os.path.dirname(__file__), '..', 'cache',
'weights_k_fold_' + str(num_fold) + '.h5')
if not os.path.isfile(k_fold_weights_path) or restore_from_last_checkpoint == 0:
callbacks = [
EarlyStopping(monitor='val_loss', patience=1, verbose=0),
ModelCheckpoint(k_fold_weights_path, monitor='val_loss', save_best_only=True, verbose=0),
]
model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=1, validation_data=(x_valid, y_valid),
callbacks=callbacks)
if os.path.isfile(k_fold_weights_path):
model.load_weights(k_fold_weights_path)
predictions_valid = model.predict(x_valid, batch_size=batch_size, verbose=1)
score = log_loss(y_valid, predictions_valid)
print('Score log_loss: ', score)
sum_score += score * len(test_index)
# Store valid predictions
for i in range(len(test_index)):
y_full_train[test_index[i]] = predictions_valid[i]
# Store test predictions
test_prediction = model.predict(test_data, batch_size=batch_size, verbose=1)
y_full_test.append(test_prediction)
score = sum_score / len(train_data)
print("Log_loss train independent avg: ", score)
predictions_valid = get_validation_predictions(train_data, y_full_train)
score1 = log_loss(train_target, predictions_valid)
if abs(score1 - score) > 0.0001:
print('Check error: {} != {}'.format(score, score1))
print('Final log_loss: {}, rows: {} cols: {} n_folds: {} epoch: {}'.format(
score, img_rows, img_cols, n_folds, nb_epoch))
test_res = merge_several_folds_fast(y_full_test, n_folds)
create_submission(test_res, test_id, 'keras_cv_drivers_v2')
save_useful_data(predictions_valid, train_id, model, 'keras_cv_drivers_v2') | 5,323,920 |
def add_watch(expr): #py:add_watch
"""
Adds a valid Python expression (given as a string) to
the watch list.
"""
RUR.add_watch(expr) | 5,323,921 |
def calculate_log_probs(conditioners, joint_dists):
"""
Calculates the marginal log probabilities of each feature's values and also the conditional
log probabilities for the predecessors given in the predecessor map.
"""
log_marginals = [
N.log(joint_dists[f,f])
for f in xrange(len(conditioners))
]
log_conditionals = [
conditional_log_dist(feature, conditioner, joint_dists)
for feature, conditioner in enumerate(conditioners)
]
return log_marginals, log_conditionals | 5,323,922 |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-p", "--pattern-identifier", dest="pattern", type="string",
help="jobs matching `pattern` in their job "
"description will be killed [default=%default].")
parser.add_option("-n", "--dry-run", dest="dry_run", action="store_true",
help="do dry run, do not kill [default=%default].")
parser.set_defaults(
pattern=None,
dry_run=False,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
output = StringIO.StringIO(
subprocess.Popen(["qstat", "-xml"],
stdout=subprocess.PIPE).communicate()[0])
tree = xml.etree.ElementTree.ElementTree(file=output)
ntested = 0
to_kill = set()
if options.pattern:
pattern = re.compile(options.pattern)
else:
pattern = None
for x in tree.getiterator("job_list"):
ntested += 1
id = x.find("JB_job_number").text
name = x.find("JB_name").text
if pattern and pattern.search(name):
to_kill.add(id)
nkilled = len(to_kill)
if not options.dry_run:
p = subprocess.Popen(
["qdel", ",".join(to_kill)], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
E.info("ntested=%i, nkilled=%i" % (ntested, nkilled))
# write footer and output benchmark information.
E.Stop() | 5,323,923 |
def validate_response_code(response, expected_res):
""" Function to validate work order response.
Input Parameters : response, check_result
Returns : err_cd"""
# check expected key of test case
check_result = {"error": {"code": 5}}
check_result_key = list(check_result.keys())[0]
# check response code
if check_result_key in response:
if "code" in check_result[check_result_key].keys():
if "code" in response[check_result_key].keys():
if (response[check_result_key]["code"] ==
expected_res):
err_cd = 0
if expected_res == 0:
logger.info('SUCCESS: Worker API response "%s"!!',
response[check_result_key]["message"])
elif expected_res == 2:
logger.info(
'Invalid parameter format in response "%s".',
response[check_result_key]["message"])
elif expected_res == 5:
logger.info('SUCCESS: WorkOrderSubmit response'
' key error and status (%s)!!\
\n', check_result[check_result_key]["code"])
else:
err_cd = 1
logger.info(
'ERROR: Response did not contain expected code '
'%s.\n', check_result[check_result_key]["code"])
else:
err_cd = 1
logger.info('ERROR: Response did not contain expected \
code %s. \n', check_result[check_result_key]["code"])
else:
check_get_result = '''{"result": {"workOrderId": "", "workloadId": "",
"workerId": "", "requesterId": "", "workerNonce": "",
"workerSignature": "", "outData": ""}}'''
check_result = json.loads(check_get_result)
check_result_key = list(check_result.keys())[0]
if check_result_key == "result":
if (set(check_result["result"].keys()).issubset
(response["result"].keys())):
# Expected Keys : check_result["result"].keys()
# Actual Keys : response["result"].keys()
err_cd = 0
logger.info('SUCCESS: WorkOrderGetResult '
'response has keys as expected!!')
else:
err_cd = 1
logger.info('ERROR: Response did not contain keys \
as expected in for test case. ')
else:
err_cd = 0
logger.info('No validation performed for the expected result \
in validate response. ')
return err_cd | 5,323,924 |
def _calc_density(x: np.ndarray, y: np.ndarray):
"""\
Function to calculate the density of cells in an embedding.
"""
from scipy.stats import gaussian_kde
# Calculate the point density
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
min_z = np.min(z)
max_z = np.max(z)
# Scale between 0 and 1
scaled_z = (z - min_z) / (max_z - min_z)
return scaled_z | 5,323,925 |
def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=dict()):
""" 根据sub_layers生成对应的Module代码。
Args:
graph (x2paddle.core.program.PaddleGraph): 整个Paddle图。
sub_layers (dict): 子图的id和其对应layer组成的字典。
sub_layers_name (str): 子图的名字。
different_attrs (dict/list): 属性字典/列表,这些属性表明在被调用时赋予不同值。
"""
def gen_codes(code_list, indent=0):
""" 根据code_list生成代码段。
Args:
code_list (list): 代码行组成的list。
indent (int): 每行空格的数量。
Returns:
str: 代码段。
"""
indent_blank = " " * indent
codes = []
for code_line in code_list:
if code_line.strip() == "":
codes.append('\n')
else:
codes.append(indent_blank + code_line + '\n')
return codes
def gen_head(inputs, different_attrs):
# 生成Layer的头部代码
head = gen_codes(
["class {}(paddle.nn.Layer):".format(sub_layers_name)], indent=0)
# 生成init函数的头部代码
diff_str_list = list()
if isinstance(different_attrs, dict):
for k, v in different_attrs.items():
diff_str_list.append("{}={}".format(k, v))
attrs_str = ", ".join(diff_str_list)
else:
attrs_str = ", ".join(different_attrs)
init_func_head = \
gen_codes(["def __init__(self, {}):".format(attrs_str)], indent=1) + \
gen_codes(["super({}, self).__init__()".format(sub_layers_name)], indent=2)
# 生成forward函数的头部代码
input_data_name = ", ".join(inputs)
forward_func_head = \
gen_codes(["def forward(self, {}):".format(input_data_name)], indent=1)
return head, init_func_head, forward_func_head
init_func = []
forward_func = []
cur_outputs = list()
inputs = list()
outputs = list()
param_prefix_list = list()
input_id = 0
for layer_id, layer in sub_layers.items():
if layer_id not in graph.edges_out:
for index, output_name in enumerate(layer.outputs):
if layer.kernel.startswith(
"paddle.nn"
) and index == 0 and "functional" not in layer.kernel:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
continue
for out_layer_id in graph.edges_out[layer_id]:
if out_layer_id not in sub_layers:
for index, output_name in enumerate(layer.outputs):
if layer.kernel.startswith(
"paddle.nn"
) and index == 0 and "functional" not in layer.kernel:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
if layer.kernel == "prim.dict":
is_set_item = True
for out_layer_id in graph.edges_out[layer_id]:
out_layer = sub_layers[out_layer_id]
if out_layer.kernel != "prim.set_item":
is_set_item = False
break
if is_set_item:
outputs.append(layer.outputs[0])
no_output_count = 0
for i, (layer_id, layer) in enumerate(sub_layers.items()):
_update_attrs(layer, different_attrs)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \
layer.kernel.startswith("custom_layer"):
line = "self.{}".format(layer.outputs[0])
if layer.kernel.startswith("custom_layer"):
line += " = x2paddle_nn.{}(".format(layer.kernel.split(":")[-1])
else:
line += " = {}(".format(layer.kernel)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
line += "{}={}, ".format(k, key_name)
else:
line += "{}={}, ".format(k, v)
line = line.strip(", ")
line += ")"
init_func.extend(gen_codes([line], indent=2))
if len(layer.outputs) == 1:
line = layer.outputs[0]
elif len(layer.outputs) == 2:
line = layer.outputs[1]
else:
if layer.kernel == "paddle.nn.LSTM":
line = "{}, ({})".format(layer.outputs[1],
', '.join(layer.outputs[-2:]))
else:
line = ','.join(layer.outputs[1:])
line += " = self.{}(".format(layer.outputs[0])
for k, v in layer.inputs.items():
if v not in cur_outputs and v not in inputs:
inputs.append(v)
line += "{}, ".format(v)
line = line.strip(", ")
line += ")"
forward_func.extend(gen_codes([line], indent=2))
if len(layer.outputs) == 1:
cur_outputs.append(layer.outputs[0])
else:
cur_outputs.extend(layer.outputs[1:])
elif "prim" in layer.kernel:
func_name = layer.kernel.replace(".", "_")
from x2paddle.op_mapper.pytorch2paddle import prim2code
if hasattr(prim2code, func_name):
for k, v in layer.inputs.items():
if v not in cur_outputs and v not in inputs:
inputs.append(v)
func = getattr(prim2code, func_name)
func(
layer,
indent=2,
init_func=init_func,
forward_func=forward_func,
layer_id=layer_id,
different_attrs=list(different_attrs.keys())
if isinstance(different_attrs, dict) else different_attrs)
cur_outputs.extend(layer.outputs)
else:
raise Exception(
"The kind {} in paddle model is not supported yet.".format(
layer.kernel))
elif layer.kernel == "module":
line = "self.{} = {}(".format(layer.outputs[0],
layer.attrs["module"])
layer.attrs.pop("module")
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
line += "{}={}, ".format(k, key_name)
else:
line += "{}={}, ".format(k, v)
line = line.strip(", ")
line += ")"
init_func.extend(gen_codes([line], indent=2))
if len(layer.outputs) == 2:
line = layer.outputs[1]
else:
line = ','.join(layer.outputs[1:])
line += " = self.{}(".format(layer.outputs[0])
for k, v in layer.inputs.items():
if v not in cur_outputs and v not in inputs:
inputs.append(v)
line += "{}, ".format(v)
line = line.strip(", ")
line += ")"
forward_func.extend(gen_codes([line], indent=2))
cur_outputs.extend(layer.outputs[1:])
else:
if layer.kernel == "paddle.to_tensor":
v = layer.attrs["data"]
if v not in cur_outputs and v not in inputs:
inputs.append(v)
if len(layer.outputs) == 1:
line = layer.outputs[0]
else:
line = ','.join(layer.outputs)
line += " = {}(".format(layer.kernel)
for k, v in layer.inputs.items():
if isinstance(v, list):
line += "{}=[{}], ".format(k, ", ".join(v))
for lv in v:
if lv not in cur_outputs and lv not in inputs:
inputs.append(lv)
else:
if v not in cur_outputs and v not in inputs:
inputs.append(v)
if k == "args":
line += v
else:
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
line += "{}=self.{}, ".format(k, key_name)
init_func.extend(
gen_codes(
["self.{} = {}".format(key_name, key_name)],
indent=2))
else:
line += "{}={}, ".format(k, v)
line = line.strip(", ")
line += ")"
if layer.kernel == "self.create_parameter":
init_func.extend(gen_codes(["self." + line], indent=2))
forward_func.extend(
gen_codes(
[
"{} = self.{}".format(layer.outputs[0],
layer.outputs[0])
],
indent=2))
else:
forward_func.extend(gen_codes([line], indent=2))
cur_outputs.extend(layer.outputs)
head, init_func_head, forward_func_head = gen_head(inputs, different_attrs)
output_data_name = ", ".join(outputs)
# remove to_tensor op
forward_func_new = list()
for line in forward_func:
if "paddle.to_tensor" in line:
continue
forward_func_new.append(line)
code_list = head + init_func_head + init_func + \
forward_func_head + forward_func_new + \
gen_codes(["return {}".format(output_data_name)], indent=2)
code_str = "".join(code_list)
return code_str | 5,323,926 |
def savetable_S4(filename, time, wavelength, bin_width, lcdata, lcerr):
"""Saves data in an event as .txt using astropy
Parameters
----------
event : An Event instance.
Description
-----------
Saves data stored in an event object as an table
Returns
-------
.txt file
Revisions
---------
"""
dims = lcdata.T.shape #tuple (wavelength position, integration)
orig_shapes = [str(time.shape), str(wavelength.shape), str(bin_width.shape), str(lcdata.shape), str(lcerr.shape)]
time = np.repeat(time, dims[1])
wavelength = np.tile(wavelength, dims[0])
bin_width = np.tile(bin_width, dims[0])
lcdata = lcdata.T.flatten()
lcerr = lcerr.T.flatten()
arr = [time, wavelength, bin_width, lcdata, lcerr]
try:
table = QTable(arr, names=('time', 'wavelength', 'bin_width', 'lcdata', 'lcerr'))
ascii.write(table, filename, format='ecsv', overwrite=True, fast_writer=True)
except ValueError as e:
raise ValueError("There was a shape mismatch between your arrays which had shapes:\n"+
"time, wavelength, bin_width, lcdata, lcerr\n"+
",".join(orig_shapes)) from e | 5,323,927 |
def test_CreativeProject_integration_ask_tell_one_loop_kwarg_response_works(covars, model_type, train_X, train_Y,
covars_proposed_iter, covars_sampled_iter,
response_sampled_iter, kwarg_response, random_start,
monkeypatch):
"""
test that a single loop of ask/tell works when providing response as kwarg to tell: creates a candidate, creates a
model, stores covariates and response. Monkeypatch "_read_response_manual_input" from ._observe.py to circumvent
manual input via builtins.input and provides response via kwargs
"""
# initialize the class
cc = TuneSession(covars=covars, model=model_type, random_start=random_start)
# set attributes on class (to simulate previous iterations of ask/tell functionality)
cc.train_X = train_X
cc.proposed_X = train_X
cc.train_Y = train_Y
cc.model["covars_proposed_iter"] = covars_proposed_iter
cc.model["covars_sampled_iter"] = covars_sampled_iter
cc.model["response_sampled_iter"] = response_sampled_iter
if covars_proposed_iter > 0:
cc.num_initial_random_points = 0
cc.random_sampling_method = None
# monkeypatch "_read_covars_manual_input"
candidate_tensor = torch.tensor([[tmp[0] for tmp in covars]], dtype=torch.double)
def mock_read_covars_manual_input(additional_text):
return candidate_tensor
monkeypatch.setattr(cc, "_read_covars_manual_input", mock_read_covars_manual_input)
# # monkeypatch "_read_response_manual_input"
# resp_tensor = torch.tensor([[12]], dtype=torch.double)
#
# def mock_read_response_manual_input(additional_text):
# return resp_tensor
# monkeypatch.setattr(cc, "_read_response_manual_input", mock_read_response_manual_input)
# run the ask method
cc.ask()
# run the tell method
cc.tell(response_obs=kwarg_response)
### check for tell (no reason to assert for ask)###
# assert that a new observation has been added for covariates
if train_X is not None:
assert cc.train_X.size()[0] == train_X.size()[0] + 1
else:
assert cc.train_X.size()[0] == 1
# assert that the right elements have been added to the covariate observation
for i in range(cc.train_X.size()[1]):
assert cc.train_X[-1, i].item() == candidate_tensor[0, i].item()
# assert that a new observation has been added for the response
if train_Y is not None:
assert cc.train_Y.size()[0] == train_Y.size()[0] + 1
else:
assert cc.train_Y.size()[1] == 1
# assert that the right elements have been added to the response observation
assert cc.train_Y[-1, 0].item() == kwarg_response[0,0].item() #resp_tensor[0, 0].item()
### check that acquisition function and model have been added
# check that a model function has been assigned (should happen in all cases as part of tell)
assert cc.model["model"] is not None
# check that an acquisition function has been added (only if some data present in train_X, train_Y at first step)
if train_X is not None:
assert cc.acq_func["object"] is not None | 5,323,928 |
def test_create_user_missing_role(mocker: Any, get_user_by_email_failure_fixture: Any, get_role_fixture: Any) -> None:
"""Check if create user method raises exception when role does not exists."""
email = 'test@mail.com'
password = 'medtagger'
first_name = 'First'
last_name = 'Last'
with pytest.raises(InvalidArgumentsException) as exception:
create_user(email, password, first_name, last_name)
assert str(exception.value) == 'Role does not exist.' | 5,323,929 |
def read_all_files(filenames):
"""Read all files into a StringIO buffer."""
return io.StringIO('\n'.join(open(f).read() for f in filenames)) | 5,323,930 |
def run(r_srffile, sim_id=0):
"""
Creates a SRF plot from an SRF file
"""
install = InstallCfg.getInstance()
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
srf2xyz_bin = os.path.join(install.A_GP_BIN_DIR, "srf2xyz")
# Save current directory
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
# Get number of segments
num_segments = get_srf_num_segments(r_srffile)
srfbase = r_srffile[0:r_srffile.find(".srf")]
# Write slip and tinit files for each segment
for seg in range(num_segments):
slipfile = "%s_seg%d.slip" % (srfbase, seg)
cmd = ("%s calc_xy=0 type=slip nseg=%d < %s > %s" %
(srf2xyz_bin, seg, r_srffile, slipfile))
bband_utils.runprog(cmd)
tinitfile = "%s_seg%d.tinit" % (srfbase, seg)
cmd = ("%s calc_xy=0 type=tinit nseg=%d < %s > %s" %
(srf2xyz_bin, seg, r_srffile, tinitfile))
bband_utils.runprog(cmd)
plottitle = 'Rupture Model for %s' % (r_srffile)
plot(plottitle, r_srffile, a_outdir)
os.chdir(old_cwd) | 5,323,931 |
def calculate_distance(geojson, unit: Unit = Unit.meters) -> Optional[float]:
"""
Calculate distance of LineString or MultiLineString GeoJSON.
Raises geojson_length.exc.GeojsonLengthException if input GeoJSON is invalid.
:param geojson: GeoJSON feature of type LineString or MultiLineString
:param unit: Unit of the result
:return: distance in preferred units
"""
try:
geometry = geojson.get("geometry", None)
except AttributeError:
raise GeojsonLengthException(
"Invalid GeoJSON provided. Should be geojson.geometry.LineString,"
" geojson.geometry.MultiLineString or dict"
)
if not geometry:
raise GeojsonLengthException("Provided GeoJSON object has no geometry field")
coordinates = geometry.get("coordinates", None)
if not coordinates:
raise GeojsonLengthException(
"Provided GeoJSON object has no coordinates specified in geometry field"
)
geometry_type = geometry.get("type", None)
if not geometry_type:
raise GeojsonLengthException(
"Provided GeoJSON object has no type specified in geometry field"
)
if geometry_type == "LineString":
return calculate_line_string(coordinates, unit)
elif geometry_type == "MultiLineString":
distance = 0
for line in coordinates:
distance += calculate_line_string(line, unit)
return distance
else:
return None | 5,323,932 |
def weighted_mse_loss(y_true, y_pred):
"""
apply weights on heatmap mse loss to only pick valid keypoint heatmap
since y_true would be gt_heatmap with shape
(batch_size, heatmap_size[0], heatmap_size[1], num_keypoints)
we sum up the heatmap for each keypoints and check. Sum for invalid
keypoint would be 0, so we can get a keypoint weights tensor with shape
(batch_size, 1, 1, num_keypoints)
and multiply to loss
"""
heatmap_sum = K.sum(K.sum(y_true, axis=1, keepdims=True), axis=2, keepdims=True)
# keypoint_weights shape: (batch_size, 1, 1, num_keypoints), with
# valid_keypoint = 1.0, invalid_keypoint = 0.0
keypoint_weights = 1.0 - K.cast(K.equal(heatmap_sum, 0.0), 'float32')
return K.sqrt(K.mean(K.square((y_true - y_pred) * keypoint_weights))) | 5,323,933 |
def p_marketprices(
i: pd.DatetimeIndex,
avg: float = 100,
year_amp: float = 0.30,
week_amp: float = 0.05,
peak_amp: float = 0.30,
has_unit: bool = True,
) -> pd.Series:
"""Create a more or less realistic-looking forward price curve timeseries.
Parameters
----------
i : pd.DatetimeIndex
Timestamps for which to create prices.
avg : float, optional (default: 100)
Average price in Eur/MWh.
year_amp : float, optional (default: 0.3)
Yearly amplitude as fraction of average. If positive: winter prices > summer prices.
week_amp : float, optional (default: 0.05)
Weekly amplitude as fraction of average. If positive: midweek prices > weekend prices.
peak_amp : float, optional (default: 0.3)
Peak-offpeak amplitude as fraction of average. If positive: peak prices > offpeak prices.
has_unit : bool, optional (default: True)
If True, return Series with pint unit in Eur/MWh.
Returns
-------
pd.Series
Price timeseries.
"""
if year_amp + week_amp + peak_amp > 1:
raise ValueError(
f"Sum of fractional amplitudes ({year_amp:.1%} and {week_amp:.1%} and {peak_amp:.1%}) should not exceed 100%."
)
# year angle: 1jan0:00..1jan0:00 -> 0..2pi. But: uniform within month
ya = i.map(lambda ts: ts.month) / 12 * np.pi * 2
# week angle: Sun0:00..Sun0:00 -> 0..2pi. But: uniform within day.
wa = i.map(lambda ts: ts.weekday() + 1) / 7 * np.pi * 2
# peak fraction: -1 (middle of offpeak hours) .. 1 (middle of peak hours)
if i.freq in ["H", "15T"]:
b = np.array([0.5, 0.8, 1, 0.8, 0.5])
if i.freq == "15T": # repeat every value 4 times
b = np.array([[bb, bb, bb, bb] for bb in b]).flatten()
b = b[: len(i)] # slice in case i is very short
pa = np.convolve(-1 + 2 * i.map(is_peak_hour), b / sum(b), mode="same")
else:
pa = np.zeros(len(i))
# Values
yv = year_amp * np.cos(ya - 0.35) # max in feb
wv = week_amp * np.cos(wa - 1.07) # max on tuesday
pv = peak_amp * pa
s = pd.Series(avg * (1 + yv + wv + pv), i, name="p")
return s if not has_unit else s.astype("pint[Eur/MWh]") | 5,323,934 |
def read_flow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print("Magic number incorrect. Invalid .flo file")
return None
else:
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2 * w * h)
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2)) | 5,323,935 |
def enable_debugging(enable: bool = True, enable_sql: bool = True) -> None:
"""
enables Pony's debugging as well as other logging in this module
"""
if enable:
pass
if enable_sql:
orm.set_sql_debug(debug=True, show_values=True) | 5,323,936 |
def run_rotors(run_tors_names, const_names):
""" a
"""
info_message(
'Running hindered rotor scans for the following rotors...',
newline=1)
for names in run_tors_names:
info_message(names)
if const_names is not None:
if set(list(chain(*run_tors_names))) == set(const_names):
info_message(
'User requested all torsions of system will be fixed.',
newline=1) | 5,323,937 |
def test_parse_VisitError_raises_original_exception(
version_selector: VersionSelector, handled_exception
):
"""
Ensures that custom errors such as ``ParseFailure`` and ``InvalidExpression`` are
re-raised as the root exception from containing ``VisitErrors`` when applying the
``SemselTransformer``.
"""
with mock.patch.object(SemselTransformer, "transform") as mocked_transform:
mocked_transform.side_effect = VisitError(
"test", Tree("test", []), handled_exception("test")
)
with pytest.raises(handled_exception) as exc:
SemselParser().parse(str(version_selector), validate=False)
assert "test" in str(exc.value) | 5,323,938 |
def p_boolean_expression(p):
"""boolean_expression: expression""" | 5,323,939 |
def sma(grp_df: pd.DataFrame, cols: List[str], windows: List[int]) -> pd.DataFrame:
"""
Calculate the simple moving average.
Parameters:
-------
grp_df: pd.DataFrame
The grouped dataframe.
col: str
window: list
List of windows to take simple moving average.
"""
for window in windows:
for col in cols:
grp_df[f"sma_{col}_{window}"] = grp_df[col].rolling(window=window).mean()
return grp_df | 5,323,940 |
def add_anchor_tag(anchor_id, header):
"""
Add anchor tag to header.
Input and output will look like below.
Input:
## Task 02 - Do something
Output:
## <a id="task02"></a> Task 02 - Do something [^](#toc)
"""
anchor = ANCHOR.format(anchor_id)
# Replace the first space with anchor tag
header_with_anchor = header.replace(' ', anchor, 1)
return ' '.join([header_with_anchor.strip(), TOC]) | 5,323,941 |
def check_unused_samples(loaded_csv_dict, loaded_img_dict):
"""Checks whether any samples loaded from a sample_info.csv file do not
match any samples loaded from a project folder. Prints a warning if
true.
Parameters
----------
loaded_csv_dict : dict{str: float}
A dict with values from .csv {SAMPLE_NAME1: SCALE_FACTOR1, ...}.
loaded_img_dict : dict
A dict loaded from a project folder with format:
{EACH_SAMPLE: {EACH_SPOT: {'img_file': FULL_IMG_PATH,
'Align_file': FULL_ALIGN_PATH or '',
'rel_file': IMG_FILENAME}, ...}, ...}.
Returns
-------
None.
"""
unused_samples_list = []
csv_samples = list(loaded_csv_dict.keys())
img_samples = list(loaded_img_dict.keys())
for csv_sample in csv_samples:
if csv_sample not in img_samples:
unused_samples_list.append(csv_sample)
if unused_samples_list:
print('WARNING: sample(s) from sample_info.csv do not match',
'sample(s) loaded from folder:', '\n',
unused_samples_list, '\n',
'Try checking names/capitalization in sample_info and try reloading') | 5,323,942 |
def create_async_executor(query: Query) -> Callable:
"""Create async executor for query.
Arguments:
query: query for which executor should be created.
Returns:
Created async executor.
"""
executor = _OPERATION_TO_EXECUTOR[query.operation_type]
return partial(executor, query) | 5,323,943 |
def batch_write_coverage(bed_fname, bam_fname, out_fname, by_count, processes):
"""Run coverage on one sample, write to file."""
cnarr = coverage.do_coverage(bed_fname, bam_fname, by_count, 0, processes)
tabio.write(cnarr, out_fname)
return out_fname | 5,323,944 |
def splitTargets(targetStr):
""" break cmdargs into parts consisting of:
1) cmdargs are already stripped of their first arg
2) list of targets, including their number. Target examples:
* staff
* staff 2
* staff #2
* player
* player #3
"""
argStr = ""
targetList = []
for arg in targetStr.split(" "):
if argStr == "": # The first arg is the item
argStr = arg
elif isCountStr(arg): # if the first arg is a number
targetList.append(argStr + " " + arg)
argStr = ""
else: # the last one is complete, this one is new
targetList.append(argStr)
argStr = arg
if argStr != "": # if the last arg hasn't been appended
targetList.append(argStr)
return targetList | 5,323,945 |
def get_model():
"""
Epoch 50/50
3530/3530 [==============================] - 10s - loss: 8.5420e-04 - acc: 1.0000 - val_loss: 0.3877 - val_acc: 0.9083
1471/1471 [==============================] - 1s
Train score: 0.00226768349974
Train accuracy: 1.0
"""
model=Sequential()
# Block 1
model.add(Conv2D(32, kernel_size=(3, 3),activation=None, input_shape=(75, 75, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Block 2
model.add(Conv2D(64, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Block 3
model.add(Conv2D(128, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, kernel_size=(3, 3),activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# You must flatten the data for the dense layers
model.add(Flatten())
#Dense 1
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.2))
#Dense 2
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
# Output
model.add(Dense(1, activation="sigmoid"))
optimizer = Adam(lr=0.0001, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model | 5,323,946 |
def path_inside_dir(path, directory):
"""
Returns True if the specified @path is inside @directory,
performing component-wide comparison. Otherwise returns False.
"""
return ((directory == "" and path != "")
or path.rstrip("/").startswith(directory.rstrip("/") + "/")) | 5,323,947 |
def register_routes(app: 'Application', routes_table):
"""Add routes handlers of the server."""
for method, path, handler, name in routes_table:
app.router.add_route(method, path, handler, name=name) | 5,323,948 |
def tfi_chain(qubits, boundary_condition="closed", data_dir=None):
"""1D Transverse field Ising-model quantum data set.
$$
H = - \sum_{i} \sigma_i^z \sigma_{i+1}^z - g\sigma_i^x
$$
Contains 81 circuit parameterizations corresponding to
the ground states of the 1D TFI chain for g in [0.2,1.8].
This dataset contains 81 datapoints. Each datapoint is represented by a
circuit (`cirq.Circuit`), a label (Python `float`) a Hamiltonian
(`cirq.PauliSum`) and some additional metadata. Each Hamiltonian in a
datapoint is a 1D TFI chain with boundary condition `boundary_condition` on
`qubits` whos order parameter dictates the value of label. The circuit in a
datapoint prepares (an approximation to) the ground state of the Hamiltonian
in the datapoint.
Example usage:
>>> qbs = cirq.GridQubit.rect(4, 1)
>>> circuits, labels, pauli_sums, addinfo =
... tfq.datasets.tfi_chain(qbs, "closed")
You can print the available order parameters
>>> [info.g for info in addinfo]
[0.20, 0.22, 0.24, ... ,1.76, 1.78, 1.8]
and the circuit corresponding to the ground state for a certain order
parameter
>>> print(circuits[10])
┌─────── ...
(0, 0): ───H───ZZ──────────────────────────────────ZZ───────── ...
│ │
(1, 0): ───H───ZZ^0.761───ZZ─────────X^0.641───────┼────────── ...
│ │
(2, 0): ───H──────────────ZZ^0.761───ZZ────────────┼────────── ...
│ │
(3, 0): ───H─────────────────────────ZZ^0.761──────ZZ^0.761─── ...
└─────────── ...
The labels indicate the phase of the system
>>> labels[10]
0
Additionally, you can obtain the `cirq.PauliSum` representation of the
Hamiltonian
>>> print(pauli_sums[10])
-1.000*Z((0, 0))*Z((1, 0))-1.000*Z((1, 0))*Z((2, 0))-1.000*Z((2, 0))*
Z((3, 0))-1.000*Z((0, 0))*Z((3, 0)) ...
-0.400*X((2, 0))-0.400*X((3, 0))
The fourth output, `addinfo`, contains additional information
about each instance of the system (see `tfq.datasets.spin_system.SpinSystem`
).
For instance, you can print the ground state obtained from
exact diagonalization
>>> addinfo[10].gs
[[-0.38852974+0.57092165j]
[-0.04107317+0.06035461j]
...
[-0.04107317+0.06035461j]
[-0.38852974+0.57092165j]]
with corresponding ground state energy
>>> addinfo[10].gs_energy
-4.169142950406478
You can also inspect the parameters
>>> addinfo[10].params
{"theta_0": 0.7614564630036476, "theta_1": 0.6774991338794768,
"theta_2": 0.6407093304791429, "theta_3": 0.7335369771742435}
and change them to experiment with different parameter values by using
the unresolved variational circuit returned by tfichain
>>> new_params = {}
... for symbol_name, value in addinfo[10].params.items():
... new_params[symbol_name] = 0.5 * value
>>> new_params
{"theta_0": 0.3807282315018238, "theta_1": 0.3387495669397384,
"theta_2": 0.32035466523957146, "theta_3": 0.36676848858712174}
>>> new_circuit = cirq.resolve_parameters(addinfo[10].var_circuit,
... new_params)
>>> print(new_circuit)
┌─────── ...
(0, 0): ───H───ZZ──────────────────────────────────ZZ───────── ...
│ │
(1, 0): ───H───ZZ^0.761───ZZ─────────X^0.32────────┼────────── ...
│ │
(2, 0): ───H──────────────ZZ^0.761───ZZ────────────┼────────── ...
│ │
(3, 0): ───H─────────────────────────ZZ^0.761──────ZZ^0.761─── ...
└─────────── ...
Args:
qubits: Python `lst` of `cirq.GridQubit`s. Supported number of spins
are [4, 8, 12, 16].
boundary_condition: Python `str` indicating the boundary condition
of the chain. Supported boundary conditions are ["closed"].
data_dir: Optional Python `str` location where to store the data on
disk. Defaults to `/tmp/.keras`.
Returns:
A Python `lst` cirq.Circuit of depth len(qubits) / 2 with resolved
parameters.
A Python `lst` of labels, 0, for the ferromagnetic phase (`g<1`), 1 for
the critical point (`g==1`) and 2 for the paramagnetic phase
(`g>1`).
A Python `lst` of `cirq.PauliSum`s.
A Python `lst` of `namedtuple` instances containing the following
fields:
- `g`: Numpy `float` order parameter.
- `gs`: Complex `np.ndarray` ground state wave function from
exact diagonalization.
- `gs_energy`: Numpy `float` ground state energy from exact
diagonalization.
- `res_energy`: Python `float` residual between the circuit energy
and the exact energy from exact diagonalization.
- `fidelity`: Python `float` overlap between the circuit state
and the exact ground state from exact diagonalization.
- `params`: Dict with Python `str` keys and Numpy`float` values.
Contains $M \times P $ parameters. Here $M$ is the number of
parameters per circuit layer and $P$ the circuit depth.
- `var_circuit`: Variational `cirq.Circuit` quantum circuit with
unresolved Sympy parameters.
"""
supported_n = [4, 8, 12, 16]
supported_bc = ["closed"]
if any(isinstance(q, list) for q in qubits):
raise TypeError("qubits must be a one-dimensional list")
if not all(isinstance(q, cirq.GridQubit) for q in qubits):
raise TypeError("qubits must be a list of cirq.GridQubit objects.")
nspins = len(qubits)
depth = nspins // 2
if nspins not in supported_n:
raise ValueError("Supported number of spins are {}, received {}".format(
supported_n, nspins))
if boundary_condition not in supported_bc:
raise ValueError(
"Supported boundary conditions are {}, received {}".format(
supported_bc, boundary_condition))
data_path = _download_spin_data('TFI_chain', boundary_condition, nspins,
data_dir)
name_generator = unique_name()
# 2 * N/2 parameters.
symbol_names = [next(name_generator) for _ in range(nspins)]
symbols = [sympy.Symbol(name) for name in symbol_names]
# Define the circuit.
circuit = cirq.Circuit(cirq.H.on_each(qubits))
for d in range(depth):
circuit.append(
cirq.ZZ(q1, q2)**(symbols[d]) for q1, q2 in zip(qubits, qubits[1:]))
if boundary_condition == "closed":
circuit.append(cirq.ZZ(qubits[nspins - 1], qubits[0])**(symbols[d]))
circuit.append(cirq.X(q1)**(symbols[d + depth]) for q1 in qubits)
# Initiate lists.
resolved_circuits = []
hamiltonians = []
order_parameters = []
additional_info = []
labels = []
# Load the data and append to the lists.
for i, directory in enumerate(x for x in os.listdir(data_path)):
# The folders are named according to the order value data they contain.
g = float(directory)
with open(os.path.join(data_path, directory, "stats.txt"), "r") as file:
lines = file.readlines()
res_e = float(lines[0].split("=")[1].strip("\n"))
fidelity = float(lines[2].split("=")[1].strip("\n"))
order_parameters.append(g)
params = np.load(os.path.join(data_path, directory, "params.npy")) \
/ np.pi
# Parameters are stored as np.float32, but cirq expects np.float64
# See https://github.com/quantumlib/Cirq/issues/3359
params = params.astype(np.float)
additional_info.append(
SpinSystemInfo(g=g,
gs=np.load(
os.path.join(data_path, directory,
"groundstate.npy"))[:, 0],
gs_energy=np.load(
os.path.join(data_path, directory,
"energy.npy"))[0],
res_energy=res_e,
fidelity=fidelity,
params=dict(zip(symbol_names, params.flatten())),
var_circuit=circuit))
# Resolve the circuit parameters.
resolved_circuit = cirq.resolve_parameters(circuit,
additional_info[i].params)
resolved_circuits.append(resolved_circuit)
# Make the PauliSum.
paulisum = sum(
-cirq.Z(q1) * cirq.Z(q2) for q1, q2 in zip(qubits, qubits[1:]))
if boundary_condition == "closed":
paulisum += -cirq.Z(qubits[0]) * cirq.Z(qubits[-1])
paulisum += -order_parameters[i] * sum(cirq.X(q) for q in qubits)
hamiltonians.append(paulisum)
# Set labels for the different phases.
if order_parameters[i] < 1.0:
labels.append(0)
elif order_parameters[i] == 1.0:
labels.append(1)
else:
labels.append(2)
# Make sure that the data is ordered from g=0.2 to g=1.8.
_, resolved_circuits, labels, hamiltonians, additional_info = zip(*sorted(
zip(order_parameters, resolved_circuits, labels, hamiltonians,
additional_info)))
return resolved_circuits, labels, hamiltonians, additional_info | 5,323,949 |
def generateObjectsIntroductionInfo(typeMode):
"""
generates gaps between introduction days based on either pareto or exponential distribution
"""
global NUM_OF_OBJECTS
global numOfObjectsIntroduced
tempNumOfObjectsIntroduced = []
while sum(tempNumOfObjectsIntroduced) < NUM_OF_OBJECTS:
if typeMode is 'HPC':
if WITH_DAY_GAPS_INTRODUCTION:
pareto_alpha_objectIntro_hpc = 1.0164
object_intro_days_gap = generateRandVariate('pareto', {'alpha':pareto_alpha_objectIntro_hpc}, 1)[0]
if object_intro_days_gap > 20:
object_intro_days_gap = 20
dayGaps.append(object_intro_days_gap)
else:
dayGaps.append(1)
else:
exponential_mu_objectIntro_hpl = 4.2705
object_intro_days_gap = generateRandVariate('exp', {'mu': exponential_mu_objectIntro_hpl}, 1)[0]
dayGaps.append(object_intro_days_gap)
# number of new objects generated in each introduction day Pareto dist
pareto_alpha_numOfObjectsGeneration = 0.8
pareto_beta_numOfObjectsGeneration = MIN_OBJ_INTRODCUED_PER_DAY_THRESHOLD
numOfObjects_intro_in_day = generateRandVariate('paretoScaled', {'alpha': pareto_alpha_numOfObjectsGeneration,
'beta': pareto_beta_numOfObjectsGeneration}, 1)[0]
if numOfObjects_intro_in_day > MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD:
numOfObjects_intro_in_day = MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD
tempNumOfObjectsIntroduced.append(numOfObjects_intro_in_day)
# sort generated items
tempNumOfObjectsIntroduced.sort()
extra_days = 0
if len(tempNumOfObjectsIntroduced) % 7 != 0:
extra_days = len(tempNumOfObjectsIntroduced) % 7
for i in range(extra_days):
# generate random int to add these objects to other introduction days to generate full weeks of data
added = False
while not added:
u = random.randint(extra_days+1, len(tempNumOfObjectsIntroduced) - 1)
if tempNumOfObjectsIntroduced[i] + tempNumOfObjectsIntroduced[u] < MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD:
tempNumOfObjectsIntroduced[u] += tempNumOfObjectsIntroduced[i]
added = True
# Exclude the extra days after being added to other days
tempNumOfObjectsIntroduced = tempNumOfObjectsIntroduced[extra_days:]
tempNumOfObjectsIntroduced.sort()
# Fill in the days by dividing the sorted data as following
# This induces that more objects are introduced on Friday then Saturday, and so on.
# The least number of objects are introduced on Tuesday.
# Fri 1, Sat 2, Sun 3, Thu 4, Wed 5, Mon 6, Tuesday 7
weeks = int(len(tempNumOfObjectsIntroduced) / 7)
FriIndex = weeks * 6
SatIndex = weeks * 5
SunIndex = weeks * 4
MonIndex = weeks * 1
TuesIndex = weeks * 0
WedIndex = weeks * 2
ThuIndex = weeks * 3
for i in range(weeks):
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[MonIndex+i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[TuesIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[WedIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[ThuIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[FriIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[SatIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[SunIndex + i])
# interarrivalTime for objects introduction in a day
pareto_alpha_interArrival = 1.0073
numOfDays = len(numOfObjectsIntroduced)
for i in range(numOfDays):
objectsCountInDay = int(np.round(numOfObjectsIntroduced)[i])
if WITH_INTRODUCTION:
interArrivals.append(generateRandVariate('pareto', {'alpha': pareto_alpha_interArrival}, objectsCountInDay))
else:
interArrivals.append([0]*objectsCountInDay)
NUM_OF_OBJECTS = int(sum(np.round(numOfObjectsIntroduced))) | 5,323,950 |
def _process(response):
"""Dummy post-processing after http request"""
pass | 5,323,951 |
def save_pca(result={}, sample_ids=[], output_fn=None, max_size=10):
"""Save PCA."""
data = {
"flot": {
"data": result["coordinates"],
"xlabel": "PC 1",
"ylabel": "PC 2",
"sample_ids": sample_ids,
},
"zero_gene_symbols": result["skipped_gene_labels"],
"components": result["all_components"][:max_size],
"all_components": result["all_components"],
"explained_variance_ratios": result["all_explained_variance_ratios"][:max_size],
"all_explained_variance_ratios": result["all_explained_variance_ratios"],
}
if output_fn:
with open(output_fn, "w") as outfile:
json.dump(data, outfile, separators=(",", ":"), allow_nan=False)
else:
print(json.dumps(data, separators=(",", ":"), allow_nan=False)) | 5,323,952 |
def generate_sd_grid_mapping_traj(ipath_sd, n_top_grid, ipath_top_grid, ipath_grid_block_gps_range,
odir_sd, mapping_rate=1, mapping_bais=None):
"""generate the gird-mapping traj for SD
"""
def random_sampling(grid_range):
"""generate a sample point within a grid range
"""
x = np.random.uniform(grid_range[0][0], grid_range[1][0])
y = np.random.uniform(grid_range[0][1], grid_range[1][1])
return x, y
# for pep8
if mapping_bais is None:
mapping_bais = {'lat': 0, 'lon': 0}
# privacy budget
with open(ipath_sd) as fr_sd:
sd = [eval(point.replace('\n', '')) for point in fr_sd.readlines()]
# C = n_top_grid ** 2
# with open(ipath_top_grid) as fr_top_grid:
# M = eval(fr_top_grid.readline())
with open(ipath_grid_block_gps_range) as fr_top_grid_block_gps_range:
fstr = fr_top_grid_block_gps_range.readlines()
grid_block_gps_range = eval(fstr[0])
# top_grid_block_gps_range = eval(fstr[1])
reverse_mapped_trajs = []
for traj in sd:
reverse_mapped_trajs.append([random_sampling(grid_block_gps_range[i]) for i in traj])
# write to files
fcount = 0
p = utils.ProgressBar(len(reverse_mapped_trajs), '生成脱敏数据集')
for i in range(len(reverse_mapped_trajs)):
p.update(i)
with open(odir_sd + '/sd_traj' + str(fcount) + '.txt', 'w') as fw_traj:
for point in reverse_mapped_trajs[i]:
# mapping
point = [point[0]/mapping_rate+mapping_bais['lat'], point[1]/mapping_rate+mapping_bais['lon']]
fw_traj.write(str(point[0])+','+str(point[1])+'\n')
fcount += 1 | 5,323,953 |
def get_2bit_path(db_opt):
"""Check if alias and return a path to 2bit file."""
if os.path.isfile(db_opt): # not an alias
return db_opt # there is nothing to do
aliased = two_bit_templ.format(db_opt)
# check that it's a file
die(f"Error! Cannot find {aliased} file", 1) if not os.path.isfile(aliased) else None
return aliased | 5,323,954 |
def test_save_load_from_video(test_video_file):
""" Test generating and saving some frame metrics from TEST_VIDEO_FILE to a file on disk, and
loading the file back to ensure the loaded frame metrics agree with those that were saved.
"""
video = VideoStreamCv2(test_video_file)
stats_manager = StatsManager()
scene_manager = SceneManager(stats_manager)
scene_manager.add_detector(ContentDetector())
video_fps = video.frame_rate
duration = FrameTimecode('00:00:20', video_fps)
scene_manager.auto_downscale = True
scene_manager.detect_scenes(video, duration=duration)
stats_manager.save_to_csv(csv_file=TEST_STATS_FILES[0])
stats_manager_new = StatsManager()
stats_manager_new.load_from_csv(TEST_STATS_FILES[0])
# Choose the first available frame key and compare all metrics in both.
frame_key = min(stats_manager._frame_metrics.keys())
metric_keys = list(stats_manager._registered_metrics)
assert stats_manager.metrics_exist(frame_key, metric_keys)
orig_metrics = stats_manager.get_metrics(frame_key, metric_keys)
new_metrics = stats_manager_new.get_metrics(frame_key, metric_keys)
for i, metric_val in enumerate(orig_metrics):
assert metric_val == pytest.approx(new_metrics[i]) | 5,323,955 |
def butterworth_type_filter(frequency, highcut_frequency, order=2):
"""
Butterworth low pass filter
Parameters
----------
highcut_frequency: float
high-cut frequency for the low pass filter
fs: float
sampling rate, 1./ dt, (default = 1MHz)
period:
period of the signal (e.g. 25Hz base frequency, 0.04s)
order: int
The order of the butterworth filter
Returns
-------
frequency, h: ndarray, ndarray
Filter values (`h`) at frequencies (`frequency`) are provided.
"""
# Nyquist frequency
h = 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** order
highcut_frequency = 300 * 1e3
h *= 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** 1
return h | 5,323,956 |
def draw_mesh(
# Main input
edof,
coord,
dof,
element_type,
# Other parameters
scale = 0.02,
alpha = 1,
render_nodes = True,
color = 'yellow',
offset = [0, 0, 0],
# BC- & F-marking
bcPrescr = None,
bc = None,
bc_color = 'red',
fPrescr = None,
f = None,
f_color = 'blue6',
eq_els = None,
eq = None,
# Element-specific input
spring = True,
nseg = 2
):
"""
Routine for undisplaced mesh for spring, bar, flow, solid, beam or plate elements.
:param array edof: Element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array coord: Nodal coordinates [number of nodes x 3]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param int element_type: Element type [1-6]
:param float scale: Element scale, nodes are scaled 50% larger than this value
:param float alpha: Element and node transparency [0-1]
:param bool render_nodes: If True, nodes are rendered
:param str color: Element color
:param list offset: Offset actors in 3D space [x, y, z]
:param array bcPrescr: Degrees of freedom with boundary conditions [number of degrees of freedom with BCs x 1]
:param array bc: Boundary conditions [number of degrees of freedom with BCs x 1]
:param str bc_color: Color for nodes with boundary conditions applied
:param array fPrescr: Degrees of freedom with forces [number of degrees of freedom with forces x 1]
:param array f: Forces at degrees of freedom [number of degrees of freedom with forces x 1]
:param str f_color: Color for nodes/elements with forces applied
:param array eq_els: Element numbers where forces are applied [number of elements with forces x 1]
:param array eq: Element force vector [number of elements with forces x 1 | number of elements with forces x 3]
:param bool spring: If True, renders spring elements as coil springs
:param int nseg: Number of points along beam elements for segmenting [number of segments + 1]
:return array mesh: List of mesh actors
"""
app = init_app()
plot_window = VedoPlotWindow.instance().plot_window
if np.size(coord, axis = 1) == 1:
coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1)
coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1)
elif np.size(coord, axis = 1) == 2:
coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1)
if 1 <= element_type <= 6:
nel, ndof_per_el, nnode, ndim, ndof, ndof_per_n = vdu.check_input(edof,coord,dof,element_type,nseg=nseg)
else:
print("draw_mesh: Invalid element type, please declare 'element_type'. The element types are:\n 1 - Spring\n 2 - Bar\n 3 - Flow\n 4 - Solid\n 5 - Beam\n 6 - Plate")
sys.exit()
# OUTPUT FROM check_input
# Number of elements: nel
# Number of degrees of freedom per element: ndof_per_el
# Number of nodes: nnode
# Number of dimensions: ndim
# Number of degrees of freedom: ndof
# Number of degrees of freedom per node: ndof_per_n
# Number of displacements: ndisp
# Element/nodal values: val
# Elements w/ a length (spring, bar & beam)
if element_type == 1 or element_type == 2 or element_type == 5:
ncoord = np.size(coord, axis = 0)
nel = np.size(edof, axis = 0)
elements = []
coord[:] += offset
if element_type == 5:
for i in range(nel):
eq_dict = {}
indx = 0
if isinstance(eq_els, np.ndarray):
for j in eq_els:
eq_dict[j[0]] = eq[indx][0]
indx += 1
for i in range(nel):
coord1,coord2 = vdu.get_coord_from_edof(edof[i,:],dof,element_type)
if element_type == 1 and spring == True:
element = v.Spring([coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]],r=1.5*scale,c=color).alpha(alpha)
element.name = f"Spring element {i+1}"
elements.append(element)
elif element_type == 1 and spring == False:
element = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha)
element.name = f"Spring element {i+1}"
elements.append(element)
elif element_type == 2:
bar = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha)
bar.name = f"Bar element {i+1}"
elements.append(bar)
# Segmented beam
elif element_type == 5 and nseg > 2:
steps = np.float32(1/(nseg-1))
dx = (coord[coord2,0]-coord[coord1,0])*steps
dy = (coord[coord2,1]-coord[coord1,1])*steps
dz = (coord[coord2,2]-coord[coord1,2])*steps
for j in range(nseg-1):
x1 = coord[coord1,0]+dx*j
y1 = coord[coord1,1]+dy*j
z1 = coord[coord1,2]+dz*j
x2 = coord[coord1,0]+dx*(j+1)
y2 = coord[coord1,1]+dy*(j+1)
z2 = coord[coord1,2]+dz*(j+1)
if np.any(np.isin(eq_els, i, assume_unique=True)) == True:
beam = v.Cylinder([[x1,y1,z1],[x2,y2,z2]],r=scale,res=4,c=f_color).alpha(alpha)
else:
beam = v.Cylinder([[x1,y1,z1],[x2,y2,z2]],r=scale,res=4,c=color).alpha(alpha)
if i in eq_dict:
beam.name = f"Beam element {i+1}, seg. {j+1}, Forces: [{eq_dict[i][0]}, {eq_dict[i][1]}, {eq_dict[i][2]}, {eq_dict[i][3]}]"
else:
beam.name = f"Beam element {i+1}, seg. {j+1}"
elements.append(beam)
elif element_type == 5:
if np.any(np.isin(eq_els, i, assume_unique=True)) == True:
beam = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=f_color).alpha(alpha)
else:
beam = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha)
if i in eq_dict:
beam.name = f"Beam element {i+1}, Forces: [{eq_dict[i][0]}, {eq_dict[i][1]}, {eq_dict[i][2]}, {eq_dict[i][3]}]"
else:
beam.name = f"Beam element {i+1}"
elements.append(beam)
if render_nodes == True:
if element_type == 1 and spring == False:
nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1)
elif element_type == 1:
nodes = vdu.get_node_elements(coord,scale*0.5,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1)
elif element_type == 2:
nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=3)
elif element_type == 5:
nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=6)
plot_window.meshes[plot_window.fig].extend(elements)
plot_window.nodes[plot_window.fig].extend(nodes)
else:
plot_window.meshes[plot_window.fig].extend(elements)
return elements
# Elements w/ a volume/surface (flow, solid & plate)
elif element_type == 3 or element_type == 4 or element_type == 6:
meshes = []
nel = np.size(edof, axis = 0)
coord[:] += offset
for i in range(nel):
eq_dict = {}
indx = 0
if isinstance(eq_els, np.ndarray):
for j in eq_els:
eq_dict[j[0]] = eq[indx][0]
indx += 1
if element_type == 3:
coords = vdu.get_coord_from_edof(edof[i,:],dof,3)
elif element_type == 4:
coords = vdu.get_coord_from_edof(edof[i,:],dof,4)
elif element_type == 6:
coords = vdu.get_coord_from_edof(edof[i,:],dof,6)
new_coord = np.zeros([8,3])
new_coord[0,0] = coord[coords[0],0]
new_coord[1,0] = coord[coords[1],0]
new_coord[2,0] = coord[coords[2],0]
new_coord[3,0] = coord[coords[3],0]
new_coord[4,0] = coord[coords[0],0]
new_coord[5,0] = coord[coords[1],0]
new_coord[6,0] = coord[coords[2],0]
new_coord[7,0] = coord[coords[3],0]
new_coord[0,1] = coord[coords[0],1]
new_coord[1,1] = coord[coords[1],1]
new_coord[2,1] = coord[coords[2],1]
new_coord[3,1] = coord[coords[3],1]
new_coord[4,1] = coord[coords[0],1]
new_coord[5,1] = coord[coords[1],1]
new_coord[6,1] = coord[coords[2],1]
new_coord[7,1] = coord[coords[3],1]
if element_type == 3 or element_type == 4:
if np.any(np.isin(eq_els, i, assume_unique=True)) == True:
mesh = v.Mesh([coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha,c=f_color).lw(1)
else:
mesh = v.Mesh([coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha).lw(1)
elif element_type == 6:
if np.any(np.isin(eq_els, i, assume_unique=True)) == True:
mesh = v.Mesh([new_coord,[[0,1,2,3]]],alpha=alpha,c=f_color).lw(1)
else:
mesh = v.Mesh([new_coord,[[0,1,2,3]]],alpha=alpha).lw(1)
if element_type == 3:
if i in eq_dict:
mesh.name = f"Flow element {i+1}, Force: {eq_dict[i][0]}"
else:
mesh.name = f"Flow element {i+1}"
elif element_type == 4:
if i in eq_dict:
mesh.name = f"Solid element {i+1}, Force: {eq_dict[i][0]}"
else:
mesh.name = f"Solid element {i+1}"
elif element_type == 6:
if i in eq_dict:
mesh.name = f"Plate element {i+1}, Force: {eq_dict[i][0]}"
else:
mesh.name = f"Plate element {i+1}"
meshes.append(mesh)
if render_nodes == True:
if element_type == 3:
nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1)
elif element_type == 4 or element_type == 6:
nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=3)
plot_window.meshes[plot_window.fig].extend(meshes)
plot_window.nodes[plot_window.fig].extend(nodes)
#print("Adding mesh to figure ",plot_window.fig+1)
else:
plot_window.meshes[plot_window.fig].extend(meshes)
#print("Adding mesh to figure ",plot_window.fig+1)
return meshes | 5,323,957 |
def test_section_11():
"""Section 11: Precedence rules.
Precedence MUST be calculated by separating the version
into major, minor, patch, pre-release, and build
identifiers in that order. Major, minor, and patch
versions are always compared numerically. Pre-release
and build version precedence MUST be determined by
comparing each dot separated identifier as follows:
identifiers consisting of only digits are compared
numerically and identifiers with letters or dashes are
compared lexically in ASCII sort order. Numeric
identifiers always have lower precedence than
non-numeric identifiers. Example: 1.0.0-alpha <
1.0.0-alpha.1 < 1.0.0-beta.2 < 1.0.0-beta.11 <
1.0.0-rc.1 < 1.0.0-rc.1+build.1 < 1.0.0 < 1.0.0+0.3.7 <
1.3.7+build < 1.3.7+build.2.b8f12d7 <
1.3.7+build.11.e0f985a.
"""
presorted = [
'1.0.0.0-alpha',
'1.0.0.0-alpha.1',
'1.0.0.0-beta.2',
'1.0.0.0-beta.11',
'1.0.0.0-rc.1',
'1.0.0.0-rc.1+build.1',
'1.0.0.0',
'1.0.0.0+0.3.7',
'1.3.7.0+build',
'1.3.7.0+build.2.b8f12d7',
'1.3.7.0+build.11.e0f985a',
]
from random import shuffle
randomized = list(presorted)
shuffle(randomized)
fixed = list(map(str, sorted(map(NonSemanticVersion, randomized))))
assert fixed == presorted | 5,323,958 |
def mypad(x, pad, mode='constant', value=0):
""" Function to do numpy like padding on tensors. Only works for 2-D
padding.
Inputs:
x (tensor): tensor to pad
pad (tuple): tuple of (left, right, top, bottom) pad sizes
mode (str): 'symmetric', 'wrap', 'constant, 'reflect', 'replicate', or
'zero'. The padding technique.
"""
if mode == 'symmetric':
# Vertical only
if pad[0] == 0 and pad[1] == 0:
m1, m2 = pad[2], pad[3]
l = x.shape[-2] # noqa
xe = reflect(np.arange(-m1, l+m2, dtype='int32'), -0.5, l-0.5)
return x[:, :, xe]
# horizontal only
elif pad[2] == 0 and pad[3] == 0:
m1, m2 = pad[0], pad[1]
l = x.shape[-1] # noqa
xe = reflect(np.arange(-m1, l+m2, dtype='int32'), -0.5, l-0.5)
return x[:, :, :, xe]
# Both
else:
m1, m2 = pad[0], pad[1]
l1 = x.shape[-1]
xe_row = reflect(np.arange(-m1, l1+m2, dtype='int32'), -0.5, l1-0.5)
m1, m2 = pad[2], pad[3]
l2 = x.shape[-2]
xe_col = reflect(np.arange(-m1, l2+m2, dtype='int32'), -0.5, l2-0.5)
i = np.outer(xe_col, np.ones(xe_row.shape[0]))
j = np.outer(np.ones(xe_col.shape[0]), xe_row)
return x[:, :, i, j]
elif mode == 'periodic':
# Vertical only
if pad[0] == 0 and pad[1] == 0:
xe = np.arange(x.shape[-2])
xe = np.pad(xe, (pad[2], pad[3]), mode='wrap')
return x[:, :, xe]
# Horizontal only
elif pad[2] == 0 and pad[3] == 0:
xe = np.arange(x.shape[-1])
xe = np.pad(xe, (pad[0], pad[1]), mode='wrap')
return x[:, :, :, xe]
# Both
else:
xe_col = np.arange(x.shape[-2])
xe_col = np.pad(xe_col, (pad[2], pad[3]), mode='wrap')
xe_row = np.arange(x.shape[-1])
xe_row = np.pad(xe_row, (pad[0], pad[1]), mode='wrap')
i = np.outer(xe_col, np.ones(xe_row.shape[0]))
j = np.outer(np.ones(xe_col.shape[0]), xe_row)
return x[:, :, i, j]
elif mode == 'constant' or mode == 'reflect' or mode == 'replicate':
return F.pad(x, pad, mode, value)
elif mode == 'zero':
return F.pad(x, pad)
else:
raise ValueError('Unkown pad type: {}'.format(mode)) | 5,323,959 |
def molarity(compound, setting = None, moles = None, volume = None):
"""
Calculations involving the molarity of a compound. Returns a value based on the setting.
The compound must be the Compound class. The moles/volume setting will be gathered from the compound itself if defined.
**Volume is assumed to be in milliliters.
Setting --> Molarity: Returns the molarity of the compound from moles and volume.
Setting --> Moles: Returns the moles of the compound from molarity and volume.
Setting --> Volume: Returns the volume of the compound from moles and volume.
"""
# Initialize settings:
if setting not in ["molarity", "moles", "volume"]:
raise ValueError("You must choose a setting: molarity, moles volume.")
if not isinstance(compound, Compound):
raise AttributeError("You must include a Compound class as the main argument")
if compound.volume and not volume:
volume = compound.volume
if not compound.volume and not volume and setting in ["molarity", "moles"]:
raise AttributeError("You must define volume either through the Compound class or through the method.")
if compound.mole_amount and not moles:
moles = compound.mole_amount
if not compound.mole_amount and not moles and setting in ["molarity", "volume"]:
raise AttributeError("You must define the mole amount either through the Compound class or through the method.")
if not compound.molarity and setting in ["moles", "volume"]:
raise AttributeError("You must define the molarity of the solution if you want to calculate molarity.")
# Calculations
if setting == "molarity":
return operator.__truediv__(moles, volume)
if setting == "moles":
return operator.__mul__(volume, compound.molarity)
if setting == "volume":
return operator.__truediv__(moles, compound.molarity)
else:
return None | 5,323,960 |
def set_initial_det(noa, nob):
""" Function
Set the initial wave function to RHF/ROHF determinant.
Author(s): Takashi Tsuchimochi
"""
# Note: r'~~' means that it is a regular expression.
# a: Number of Alpha spin electrons
# b: Number of Beta spin electrons
if noa >= nob:
# Here calculate 'a_ab' as follow;
# |r'(01){a-b}(11){b}'> wrote by regular expression.
# e.g.)
# a=1, b=1: |11> = |3>
# a=3, b=1: |0101 11> = |23> = |3 + 5/2*2^3>
# r'(01){a-b}' = (1 + 4 + 16 + ... + 4^(a-b-1))/2
# = (4^(a-b) - 1)/3
# That is, it is the sum of the first term '1'
# and the geometric progression of the common ratio '4'
# up to the 'a-b' term.
base = nob*2
a_ab = (4**(noa-nob) - 1)//3
elif noa < nob:
# Here calculate 'a_ab' as follow;
# |r'(10){b-a}(11){a}'> wrote by regular expression.
# e.g.)
# a=1, b=1: |11> = |3>
# a=1, b=3: |1010 11> = |43> = |3 + 5*2^3>
# r'(10){b-a}' = 2 + 8 + 32 + ... + 2*4^(b-a-1)
# = 2 * (4^(b-a) - 1)/3
# That is, it is the sum of the first term '2'
# and the geometric progression of the common ratio '4'
# up to the 'a-b' term.
base = noa*2
a_ab = 2*(4**(nob-noa) - 1)//3
return 2**base-1 + (a_ab<<base) | 5,323,961 |
def face_detection(frame):
""" detect face using cv2
:param frame:
:return: (x,y), w, h: face position x,y coordinates, face width, face height
"""
if frame is None :
return 0,0,0,0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
position_x, position_y ,width,height = 0, 0, 0, 0
for x, y, w, h in faces:
position_x, position_y ,width,height = x, y, w, h
return position_x, position_y,width,height | 5,323,962 |
def coro1():
"""定义一个简单的基于生成器的协程作为子生成器"""
word = yield 'hello'
yield word
return word # 注意这里协程可以返回值了,返回的值会被塞到 StopIteration value 属性 作为 yield from 表达式的返回值 | 5,323,963 |
def _deduce_num_classes(params):
"""Set `num_classes` for `params`."""
if 'imagenet' in params.dataset_name.lower():
num_classes = 1000
elif 'cifar100' in params.dataset_name.lower():
num_classes = 100
else:
logging.info(
f'Cannot infer `num_classes` for dataset {params.dataset_name}. Use 10')
num_classes = 10
if 'num_classes' in params and num_classes != params.num_classes:
logging.info('Replace `params.num_classes` from {0} to {1}'.format(
params.num_classes, num_classes))
params.set_hparam('num_classes', num_classes) | 5,323,964 |
def plot_combinations_9array3x3_v2(coli_to_test, sorted_combinations, sorted_vals, comb_ind, renaming_fun):
"""Plot the nine best decompositions of a given set with variables
outside the matrix for a decomposition of 3 variables
Parameters
----------
coli_to_test : Pandas dataframe
cell cycle dataframe
sorted_combinations : array of string lists
each element of the array is a quadruplet of variable names
corresponding to a decomposition. The list is sorted from
best to worst decomposition
sorted_vals : array of floats
independence I value for sorted decompositions
comb_ind : numpy array
list of indices to plot
renaming_fun : str
name of function to use for renaming variables
Returns
-------
fig : matplotlib handles
matplotlib reference to plot
"""
fig, axes = plt.subplots(figsize=(20,20))
axes.set_axis_off()
for ind, comb in enumerate(comb_ind):
c = list(itertools.product(sorted_combinations[comb], sorted_combinations[comb]))
c = [[str(x) for x in y] for y in c]
pairwise = np.reshape([scipy.stats.pearsonr(coli_to_test[c[x][0]],coli_to_test[c[x][1]])[0]
for x in np.arange(len(c))],(3,3))
names = np.array(np.split(np.array([renaming_fun(x) for x in c]),3))
ax = fig.add_subplot(3, 3, ind+1)
ax.imshow(pairwise,cmap = 'seismic',vmin=-1,vmax = 1)
for i in range(names.shape[0]):
for j in range(names.shape[0]):
if np.abs(pairwise[i,j])>0.5:
col = 'white'
else:
col = 'black'
#plt.text(x=i-0.1, y=j-0.2, s = names[i,j][0], color = col,size = 30)
#plt.text(x=i-0.1, y=j+0.2, s = names[i,j][1], color = col, size = 30)
for i in range(names.shape[0]):
plt.text(x=-0.6, y=i+0.2, s = names[i,0][0], color = 'black',size = 35,
horizontalalignment = 'right')
plt.text(x=i-0.0, y=-0.6, s = names[0,i][1], color = 'black',size = 35,
horizontalalignment = 'center')
ax.set_axis_off()
ax.set_title('I: '+str(np.around(sorted_vals[comb],3)),fontsize = 35, pad = 55)
if ind==7:
break
fig.subplots_adjust(hspace = 0.4)
#plt.show()
return fig | 5,323,965 |
def print_table_of_2_params(results, param_x, param_y, title="Some table"):
"""
Prints table from the results dictionary, currently used only to print the tables for the heatmaps in the
report.
"""
results = results['estimator'][0].cv_results_
param_list_x = set()
param_list_y = set()
for param_set in results['params']:
param_list_x.add(param_set[param_x])
param_list_y.add(param_set[param_y])
from numbers import Number
param_list_x = sorted(list(param_list_x), key=lambda x: (isinstance(x, Number), x))
param_list_y = sorted(list(param_list_y), key=lambda x: (isinstance(x, Number), x))
table = [[None for col in range(len(param_list_y))] for row in range(len(param_list_x))]
for i in range(len(results["params"])):
C, gamma = results["params"][i][param_x], results["params"][i][param_y]
table[param_list_x.index(C)][param_list_y.index(gamma)] = 100*results["mean_test_score"][i]
print_table_title(title)
print(tabulate.tabulate(table, headers=param_list_y, showindex=param_list_x)) | 5,323,966 |
def random_split(df: Union[DataFrame, Series], split_size: float,
shuffle: bool = True, random_state: int = None) -> Tuple[DataFrame]:
"""Shuffles a DataFrame and splits it into 2 partitions according to split_size.
Returns a tuple with the split first (partition corresponding to split_size, and remaining second).
Args:
df (DataFrame): A DataFrame to be split
split_size (float): Fraction of the sample to be taken
shuffle (bool): If True shuffles sample rows before splitting
random_state (int): If an int is passed, the random process is reproducible using the provided seed"""
assert random_state is None or (isinstance(random_state, int) and random_state >=
0), 'The random seed must be a non-negative integer or None.'
assert 0 <= split_size <= 1, 'split_size must be a fraction, i.e. a float in the [0,1] interval.'
if shuffle: # Shuffle dataset rows
sample = df.sample(frac=1, random_state=random_state)
split_len = int(sample.shape[0] * split_size)
split = sample.iloc[:split_len]
remainder = sample.iloc[split_len:]
return split, remainder | 5,323,967 |
def create_discriminator_inputs(images, conditional_vectors):
"""
識別器用入力画像(画像+条件画像)を生成する。
Args:
images: 画像
conditional_vectors: 条件ベクトル
index: image_seqから取得するデータのインデックス
Returns
画像+条件画像を統合したテンソル (B, H, W, A + C)
B: バッチサイズ。images.shape[0]
H: 画像の高さ。images.shape[1]
W: 画像の幅。images.shape[2]
A: 画像の成分数。images.shape[3]
C: 条件ベクトルの次元
"""
# eagerモードにしておかないと、tensor.numpy()やスライスが使用困難
tf.config.experimental_run_functions_eagerly(True)
conditional_images = np.zeros((images.shape[0], images.shape[1], images.shape[2], conditional_vectors.shape[-1]),
dtype='float32')
if tf.is_tensor(conditional_vectors):
conditional_vectors = conditional_vectors.numpy()
conditional_images[:, ] = conditional_vectors.reshape((images.shape[0], 1, 1, conditional_vectors.shape[-1]))
else:
conditional_images[:, ] = conditional_vectors.reshape((images.shape[0], 1, 1, conditional_vectors.shape[-1]))
return tf.concat([images, conditional_images], axis=-1) | 5,323,968 |
def gfs_scan_bands(filepath: str):
"""
Helper function to scan through all bands in grib file and save their names into csv file.
:param filepath: path to particular grib file.
"""
if not os.path.isfile(filepath):
raise ValueError("Wrong filepath - file not found!")
grib = gdal.Open(filepath)
with open("bands.csv", 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(1, grib.RasterCount):
band = grib.GetRasterBand(i)
writer.writerow([str(i), band.GetMetadata()['GRIB_COMMENT'], band.GetDescription()])
csvfile.close() | 5,323,969 |
def add_to_master_list(single_list, master_list):
"""This function appends items in a list to the master list.
:param single_list: List of dictionaries from the paginated query
:type single_list: list
:param master_list: Master list of dictionaries containing group information
:type master_list: list
:returns: The master list with the appended data
"""
for list_item in single_list:
master_list.append(list_item)
return master_list | 5,323,970 |
def weight_by_attr(
attr: str, prev_edge: Optional[models.Edge], edge: models.Edge
) -> float:
"""
Generic weight function to retrieve a value from an edge.
"""
return getattr(edge, attr) | 5,323,971 |
def check_if_porous(structure: Structure, threshold: float = 2.4) -> Union[bool, None]:
"""Runs zeo++ to check if structure is porous according to the CoRE-MOF
definition (PLD > 2.4, https://pubs.acs.org/doi/10.1021/acs.jced.9b00835)
Args:
structure (Structure): MOF structure to check
threshold (float, optional): Threshold on the sphere diameter in Angstrom.
Defaults to 2.4.
Returns:
bool: True if porous.
"""
if is_tool("network"):
zeopp_results = run_zeopp(structure)
if zeopp_results["lifs"] >= threshold:
return True
return False
warnings.warn(NO_ZEOPP_WARNING)
return None | 5,323,972 |
def load_vlay( #load a layer from a file
fp,
providerLib='ogr',
logger=mod_logger):
"""
what are we using this for?
see instanc emethod
"""
log = logger.getChild('load_vlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#check if this is valid
if not vlay_raw.isValid():
log.error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
raise Error('vlay loading produced an invalid layer')
#check if it has geometry
if vlay_raw.wkbType() == 100:
log.error('loaded vlay has NoGeometry')
raise Error('no geo')
#==========================================================================
# report
#==========================================================================
vlay = vlay_raw
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay | 5,323,973 |
def order_node_list(tree):
"""
Sorts a list of node dict from a LightGBM instance. Key `tree_structure` is
specific for LightGBM.
Parameters
----------
tree : list,
Unsorted list of node dicts
Returns
-------
ordered_node_list : list,
Ordered list of node dicts compatible with `GbmModel`
"""
node = []
node.append(tree['tree_structure'])
ordered_node_list = []
add_next_nodes(ordered_node_list, node)
return ordered_node_list | 5,323,974 |
def test_build(topology):
"""
Test automatic build and unbuild of the topology using pytest plugin.
"""
assert config.pluginmanager.getplugin('topology')
assert isinstance(topology, TopologyManager)
assert topology.get('sw1') is not None
assert topology.get('sw2') is not None
assert topology.get('hs1') is not None
assert topology.get('hs2') is not None | 5,323,975 |
def teardown_request_wrap(exception):
"""
Prints tracebacks and handles bugs
"""
if exception:
logging.error(traceback.format_exc())
return json.dumps({"result":None, 'error':{'message':'Invalid request'}, 'id':1}) | 5,323,976 |
def sanitize_parameters(func):
"""Sets any queryparams in the kwargs"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
logging.info(f'[middleware] [sanitizer] args: {args}')
myargs = dict(request.args)
# Exclude params like loggedUser here
sanitized_args = remove_keys(['loggedUser'], myargs)
kwargs['params'] = sanitized_args
except GeostoreNotFound:
return error(status=404, detail='body params not found')
return func(*args, **kwargs)
return wrapper | 5,323,977 |
def build_render_setup(cfg):
"""Build information struct about the rendering backup from a configuration.
This performs type conversion to the expected types. Paths contained in
cfg are expected to be alread expanded. That is, it should not contain
global variables or other system dependent abbreviations.
Args:
cfg (dict): dictionary with Dataset configuration
Returns:
dict
Raises:
None
"""
render_setup = dict()
render_setup['backend'] = str(cfg['backend'])
if render_setup['backend'] == 'blender-cycles':
render_setup['samples'] = float(cfg['samples'])
render_setup['integrator'] = str(cfg['integrator'])
render_setup['denoising'] = bool(cfg['denoising'])
try:
render_setup['allow_occlusions'] = bool(cfg['allow_occlusions'])
render_setup['motion_blur'] = bool(cfg['motion_blur'])
except KeyError:
render_setup['allow_occlusions'] = ''
logger.warn('Dataset does not contain occlusions/blur info. It might be an old dataset version.')
else:
logger.warn('Loading dataset which have not been rendered with ABR')
return render_setup | 5,323,978 |
def generate_coupled_image_from_self(img, out_img, noise_amp=10):
"""
Generates an input image for siam by concatenating an image with a transformed version of itself
"""
def __synthesize_prev_img(in_img, noise_amp=10):
"""Synthesizes previous frame by transforming the input image
Args:
in_img (str): input image path
noise_amp (int, optional): Defaults to 10.
Returns:
2-D ndarray: the synthesized previous image
"""
data = tifffile.imread(in_img)
image = data
modes_x, modes_y = 10, 4
amp = 1
amps_x, amps_y = np.random.random_sample(modes_x)*amp, np.random.random_sample(modes_y)*amp
def func(xy):
return (xy[0]+ np.sum(amps_y * np.sin(modes_y*2*np.pi*xy[0]/image.shape[0])), xy[1] + np.sum(amps_x * np.sin(modes_x*2*np.pi*xy[1]/image.shape[1])))
out = geometric_transform(image, func)
noise = np.random.normal(0, noise_amp, size=image.shape)
out = out +noise
out[out<0] = 0
out[out>255] = 255
return out
curr_frame = tifffile.imread(img)
synthesized_previous_frame = __synthesize_prev_img(img, noise_amp)
if curr_frame is None:
raise IOError # tiff file not found
out = np.concatenate((synthesized_previous_frame, curr_frame), axis=1).astype(np.uint8)
cv2.imwrite(filename=out_img, img=out, ) | 5,323,979 |
def pca_preprocess(df, pca_components):
"""Preprocess the given dataframe using PCA"""
# Drop rows
df.dropna(axis=0, inplace=True)
# Separate features and targets
X = df.drop('ASPFWR5', axis=1)
y = df['ASPFWR5']
# Dimensionality reduction with principal component analysis
X = StandardScaler().fit_transform(X)
X = PCA(n_components = pca_components).fit_transform(X)
# Set index to datetime
df = pd.DataFrame(X, index=y.index)
# Merge X and y
df['ASPFWR5'] = y
return df | 5,323,980 |
def add_nonce(func):
"""Helper function which adds a nonce to the kwargs dict"""
@wraps(func)
def inner(*args, **kwargs):
if "nonce" not in kwargs:
kwargs["nonce"] = int(datetime.datetime.utcnow().timestamp() * 1000)
return func(*args, **kwargs)
return inner | 5,323,981 |
def build_train_dict(config_file: str, task: str) -> Dict[str, Any]:
"""
Read the configuration file given by the user.
If it is a TOML file, ensures that the format corresponds to the one in resources.
Args:
config_file: path to a configuration file (JSON of TOML).
task: task learnt by the network (example: classification, regression, reconstruction...).
Returns:
dictionary of values ready to use for the MapsManager
"""
if config_file is None:
# read default values
clinicadl_root_dir = os.path.abspath(os.path.join(__file__, "../.."))
config_path = os.path.join(
clinicadl_root_dir,
"resources",
"config",
"train_config.toml",
)
config_dict = toml.load(config_path)
config_dict = remove_unused_tasks(config_dict, task)
train_dict = dict()
# Fill train_dict from TOML files arguments
for config_section in config_dict:
for key in config_dict[config_section]:
train_dict[key] = config_dict[config_section][key]
elif config_file.endswith(".toml"):
user_dict = toml.load(config_file)
if "Random_Search" in user_dict:
del user_dict["Random_Search"]
# read default values
clinicadl_root_dir = os.path.abspath(os.path.join(__file__, "../.."))
config_path = os.path.join(
clinicadl_root_dir,
"resources",
"config",
"train_config.toml",
)
config_dict = toml.load(config_path)
# Check that TOML file has the same format as the one in clinicadl/resources/config/train_config.toml
if user_dict is not None:
for section_name in user_dict:
if section_name not in config_dict:
raise ClinicaDLConfigurationError(
f"{section_name} section is not valid in TOML configuration file. "
f"Please see the documentation to see the list of option in TOML configuration file."
)
for key in user_dict[section_name]:
if key not in config_dict[section_name]:
raise ClinicaDLConfigurationError(
f"{key} option in {section_name} is not valid in TOML configuration file. "
f"Please see the documentation to see the list of option in TOML configuration file."
)
config_dict[section_name][key] = user_dict[section_name][key]
train_dict = dict()
# task dependent
config_dict = remove_unused_tasks(config_dict, task)
# Fill train_dict from TOML files arguments
for config_section in config_dict:
for key in config_dict[config_section]:
train_dict[key] = config_dict[config_section][key]
elif config_file.endswith(".json"):
train_dict = read_json(config_file)
else:
raise ClinicaDLConfigurationError(
f"config_file {config_file} should be a TOML or a JSON file."
)
return train_dict | 5,323,982 |
def lint() -> None:
"""Run linter checks."""
execute("lint", ["flake8"], "Address any remaining flake8 issues manually.") | 5,323,983 |
def morse_encode(string):
"""Converts a string to morse code"""
words = [morse_encode_word(word) for word in string.split(' ')]
return ' '.join(words) | 5,323,984 |
def examples():
"""Load example paths."""
return [(loader(path), path) for path in glob.glob(os.path.join(RESOURCE_DIR, "examples", "*.json"))] | 5,323,985 |
def TIMES_cleanup (file, Model_Module):
"""Cleans data genrated by Oasis TIMES and returns a dataframe witht he DTXSID of the parent compound and InChI key of each metabolite"""
"""The Model_Module argument should be a string to designate the model used for metabolism (e.g., TIMES_RatLiver S9, TIMES_RatInVivo"""
df = []
df = pd.read_csv(file, delimiter = "\t", usecols = ['Chem. Name', 'Smiles']) #Reads 'Chem. Name' and 'Smiles' columns for tab-delimited TIMES file
df = df.rename(columns={'Chem. Name':'DTXSID'}) #Renames 'Chem. Name' to 'DTXSID'
df = df[:-1] #Remove empty bottom row
df = df[1:] #Remove empty top row
df['Smiles'] = df['Smiles'].str.replace('{','[').str.replace('}',']') #Replaces curly brackets for normal brackets in SMILES strings
df[Model_Module] = 1 #Adds Times_(Model_Module) to designate model generating metabolite
df['DTXSID'].replace({' ': num.NaN}, inplace = True) #Cleans DTXSID to list NaN in empty rows
df['Metabolite_INCHIKEY'] = num.NaN #Initialized column for metabolite InChI key
metabList = df.Smiles[df['DTXSID'].isnull()] #Establishes boolean list to desgiante indecies with metabolite smiles
df['Metabolite_INCHIKEY'] = SMILES_to_InchiKey(metabList) #Converts metabolie SMILES to InChI keys
df['DTXSID'] = df['DTXSID'].fillna(method = 'ffill') #Fills empty spaces with DTXSID, empty spaces are filled with the preceeding DTXSID
df = df[df['Metabolite_INCHIKEY'].notnull()] #Removes any all parent entries, whcih are represented as nulls in the metabolite INCHIKEY list
df = df.drop_duplicates()
df['Clean_SMILES'] = clean_SMILES(df['Smiles'])
return df[['DTXSID','Metabolite_INCHIKEY','Clean_SMILES', Model_Module]]; | 5,323,986 |
def generate_key():
"""Generate an key for our cipher"""
shuffled = sorted(chars, key=lambda k: random.random())
return dict(zip(chars, shuffled)) | 5,323,987 |
def get_sdkconfig_value(sdkconfig_file, key):
"""
Return the value of given key from sdkconfig_file.
If sdkconfig_file does not exist or the option is not present, returns None.
"""
assert key.startswith('CONFIG_')
if not os.path.exists(sdkconfig_file):
return None
# keep track of the last seen value for the given key
value = None
# if the value is quoted, this excludes the quotes from the value
pattern = re.compile(r"^{}=\"?([^\"]*)\"?$".format(key))
with open(sdkconfig_file, 'r') as f:
for line in f:
match = re.match(pattern, line)
if match:
value = match.group(1)
return value | 5,323,988 |
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start] + match.string[end:] | 5,323,989 |
def cubicgw(ipparams, width, etc = []):
"""
This function fits the variation in Gaussian-measured PRF half-widths using a 2D cubic.
Parameters
----------
x1: linear coefficient in x
x2: quadratic coefficient in x
x3: cubic coefficient in x
y1: linear coefficient in y
y2: quadratic coefficient in y
y3: cubic coefficient in y
c : constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2018-11-16 Kevin Stevenson, STScI
kbs@stsci.edu
Original version
"""
x1 = ipparams[0]
x2 = ipparams[1]
x3 = ipparams[2]
y1 = ipparams[3]
y2 = ipparams[4]
y3 = ipparams[5]
c = ipparams[6]
s0 = ipparams[7]
sy, sx = width
return x1*(sx-s0) + x2*(sx-s0)**2 + x3*(sx-s0)**3 + y1*(sy-s0) + y2*(sy-s0)**2 + y3*(sy-s0)**3 + c | 5,323,990 |
def _make_parser():
""" Generates argument parser with all necessarry parameters.
:returns script's arguments (host, port, index, type, id,
searchserver, server, stdin, pipeline)
:rtype argparse.ArgumentParser
"""
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter) # noqa
inputgroup = p.add_mutually_exclusive_group(required=True)
inputgroup.add_argument('-server', type=str, # noqa
help="use http://host:port/index/type. "
"Defines the Elasticsearch node and its index "
"for the input data")
inputgroup.add_argument('-stdin', action="store_true", # noqa
help="get data from stdin. Might be used with -pipeline.")
p.add_argument('-pipeline', action="store_true",
help="output every record (even if not enriched) "
"to put this script into a pipeline")
p.add_argument('-searchserver', type=str,
help="use http://host:port/index/type "
"to provide a local Elasticsearch instance "
"with entityfacts in the specified index")
p.add_argument('-ignhub', action="store_true",
help="ignore hub.culturegraph.org. Here a local "
"searchserver must be provided.")
return p | 5,323,991 |
def main():
"""
This function displays all the gui elements of the music recommender system
Parameters:
-
Returns:
df_list (DataFrame): the list of input audio entered by the user
"""
fileslist = get_static_store()
folderPath = col1.text_input('Enter folder path:')
# Declaring the cm variable by the
# color palette from seaborn
cm = sns.light_palette("blue", as_cmap=True)
if folderPath:
filename = file_selector(folderPath)
if not filename in fileslist.values():
fileslist[filename] = filename
else:
fileslist.clear() # Hack to clear list if the user clears the cache and reloads the page
col1.info("Select an audio file")
df_list = pd.DataFrame(columns = ['Title'])
# clear list
if col1.button("Clear music list"):
fileslist.clear()
df_list = list(fileslist.keys())
# show list
if col1.checkbox("Show music list?", True):
# transform list into dataframe for ease of use
df_list = pd.DataFrame(columns = ['Title'])
df_list['Title'] = list(fileslist.keys())
# color palette from seaborn
cm = sns.light_palette("green", as_cmap=True)
col1.dataframe(df_list.style.background_gradient(cmap=cm).set_precision(2))
return df_list | 5,323,992 |
def get_columns_by_type(df, req_type):
"""
get columns by type of data frame
Parameters:
df : data frame
req_type : type of column like categorical, integer,
Returns:
df: Pandas data frame
"""
g = df.columns.to_series().groupby(df.dtypes).groups
type_dict = {k.name: v for k, v in g.items()}
return type_dict.get(req_type) | 5,323,993 |
def print_atoms(inp_file, atoms, fixed_atom_idxs):
"""Print the atoms to the input file depending on whether they are fixed"""
for i, atom in enumerate(atoms):
x, y, z = atom.coord
if i in fixed_atom_idxs:
line = f'{atom.label:<3}{x:^10.5f} 0 {y:^10.5f} 0 {z:^10.5f} 0'
else:
line = f'{atom.label:<3}{x:^10.5f} 1 {y:^10.5f} 1 {z:^10.5f} 1'
print(line, file=inp_file)
return | 5,323,994 |
def get_total(lines):
"""
This function takes in a list of lines and returns
a single float value that is the total of a particular
variable for a given year and tech.
Parameters:
-----------
lines : list
This is a list of datalines that we want to total.
Returns:
--------
total : float
This is the sum total from the data lines.
"""
total = 0.0
for line in lines:
data_sep = line.split()
total += float(data_sep[0])
return total | 5,323,995 |
def get_ipv6_by_ids(ip_ids):
"""Get Many Ipv6."""
networks = list()
for ip_id in ip_ids:
networks.append(get_ipv6_by_id(ip_id))
return networks | 5,323,996 |
def box(t, t_start, t_stop):
"""Box-shape (Theta-function)
The shape is 0 before `t_start` and after `t_stop` and 1 elsewhere.
Args:
t (float): Time point or time grid
t_start (float): First value of `t` for which the box has value 1
t_stop (float): Last value of `t` for which the box has value 1
Note:
You may use :class:`numpy.vectorize`, :func:`functools.partial`, or
:func:`qutip_callback`, cf. :func:`flattop`.
"""
if t < t_start:
return 0.0
if t > t_stop:
return 0.0
return 1.0 | 5,323,997 |
def main():
"""
The main function, where the program starts.
:return: None
"""
user = start_story()
paths = [scott_adventure, places_to_go(), team_2_adv,
team_3_adv, team_4_adv, team_5_adv,
team_6_adv, team_7_adv, team_8_adv,
team_9_adv, team_10_adv, cullomn_whitfordr,
westth_benningfield, team_13_adv, team_14_adv,
team_15_prattw_vankirkj, dovranovs_adventure, team_17_adv,
team_18_adv, team_19_adv, team_20_adv]
random.shuffle(paths) # Shuffles the order of paths, so each adventure is different
for i in range(len(paths)):
paths[i]() # Runs each function in the paths list
end_story(user) | 5,323,998 |
def updateBaselines(product, date:datetime, n_workers=20, block_scale_factor= 1, time=False) -> dict:
"""Updates anomaly baselines
***
Parameters
----------
product:str
date:datetime
n_workers:int
block_scale_factor:int
time:bool
Returns
-------
Dictionary with the following key/value pairs:
product:str
Product name
paths:tuple
Tuple of filepaths of the anomalybaseline
files that were updated
"""
startTime = datetime.now()
# create dict of anomaly baseline folders for each baseline type
baseline_locations = {anomaly_type:os.path.join(BASELINE_DIR,product,anomaly_type) for anomaly_type in ["mean_5year","median_5year",'mean_10year','median_10year']}
# get list of input data files
input_paths = _listFiles(product,date)
# check to make sure we got at least 10
if len(input_paths) < 10:
raise UnavailableError(f"Only {len(input_paths)} input image paths found")
# get raster metadata and dimensions
with rasterio.open(input_paths[0]) as tempmeta:
metaprofile = tempmeta.profile
width = tempmeta.width
height = tempmeta.height
# add BIGTIFF where necessary
if product in NDVI_PRODUCTS:
metaprofile['BIGTIFF'] = 'YES'
# set output filenames
output_date = _getMatchingBaselineDate(product,date)
mean_5yr_name = os.path.join(baseline_locations["mean_5year"], f"{product}.{output_date}.anomaly_mean_5year.tif")
median_5yr_name = os.path.join(baseline_locations["median_5year"], f"{product}.{output_date}.anomaly_median_5year.tif")
mean_10yr_name = os.path.join(baseline_locations["mean_10year"], f"{product}.{output_date}.anomaly_mean_10year.tif")
median_10yr_name = os.path.join(baseline_locations["median_10year"],f"{product}.{output_date}.anomaly_median_10year.tif")
# open output handles
log.debug("Opening handles")
mean_5yr_handle = rasterio.open(mean_5yr_name, 'w', **metaprofile)
median_5yr_handle = rasterio.open(median_5yr_name, 'w', **metaprofile)
mean_10yr_handle = rasterio.open(mean_10yr_name, 'w', **metaprofile)
median_10yr_handle = rasterio.open(median_10yr_name, 'w', **metaprofile)
# set block size and get windows
blocksize = metaprofile['blockxsize'] * int(block_scale_factor)
windows = getWindows(width,height,blocksize)
# use windows to create parallel args
parallel_args = [(w, input_paths, metaprofile['dtype']) for w in windows]
# do multiprocessing
p = multiprocessing.Pool(n_workers)
for win, values in p.imap(_mp_worker, parallel_args):
mean_5yr_handle.write(values['mean_5year'], window=win, indexes=1)
median_5yr_handle.write(values['median_5year'], window=win, indexes=1)
mean_10yr_handle.write(values['mean_10year'], window=win, indexes=1)
median_10yr_handle.write(values['median_10year'], window=win, indexes=1)
## close pool
p.close()
p.join()
## close handles
mean_5yr_handle.close()
median_5yr_handle.close()
mean_10yr_handle.close()
median_10yr_handle.close()
# cloud-optimize new anomalies
log.debug("Converting baselines to cloud-optimized geotiffs and ingesting to S3")
# cloud-optimize outputs
output_paths = (mean_5yr_name, median_5yr_name, mean_10yr_name, median_10yr_name)
p = multiprocessing.Pool(len(output_paths))
p.imap(cloud_optimize_inPlace,output_paths)
## close pool
p.close()
p.join()
# if time==True, log total time for anomaly generation
endTime = datetime.now()
if time:
log.info(f"Finished in {endTime-startTime}")
# return dict
return {'product':product, 'paths':output_paths} | 5,323,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.