hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a289d9f60ab55aac5358d7c9a198ff79b014f668
| 2,590
|
py
|
Python
|
sd_material_ui/CircularProgress.py
|
Michael-fore/sd-material-ui
|
9496d02821c7a25c71fc9d03729f551432bf55f5
|
[
"MIT"
] | null | null | null |
sd_material_ui/CircularProgress.py
|
Michael-fore/sd-material-ui
|
9496d02821c7a25c71fc9d03729f551432bf55f5
|
[
"MIT"
] | null | null | null |
sd_material_ui/CircularProgress.py
|
Michael-fore/sd-material-ui
|
9496d02821c7a25c71fc9d03729f551432bf55f5
|
[
"MIT"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CircularProgress(Component):
"""A CircularProgress component.
Material UI CircularProgress component
Keyword arguments:
- classes (dict; optional): The classes to be applied to this component. This keys in this object must be valid CSS rule
names, and the values must be strings for the classnames to be assigned to each rule name
Valid rule names are:
root
static
indeterminate
colorPrimary
colorSecondary
circle
circleStatic
circleIndeterminate
circleDisableShrink. classes has the following type: dict containing keys 'root', 'static', 'indeterminate', 'colorPrimary', 'colorSecondary', 'circle', 'circleStatic', 'circleIndeterminate', 'circleDisableShrink'.
Those keys have the following types:
- root (string; optional)
- static (string; optional)
- indeterminate (string; optional)
- colorPrimary (string; optional)
- colorSecondary (string; optional)
- circle (string; optional)
- circleStatic (string; optional)
- circleIndeterminate (string; optional)
- circleDisableShrink (string; optional)
- color (string; default 'inherit'): Override the progress's color
- mode (default 'indeterminate'): The mode of show your progress, for now, will always be indeterminate
- size (number; default 40): The diameter of the progress in pixels
- style (dict; optional): Override the inline-style of the root element
- thickness (number; default 3.5): Stroke width in pixels"""
@_explicitize_args
def __init__(self, classes=Component.UNDEFINED, color=Component.UNDEFINED, mode=Component.UNDEFINED, size=Component.UNDEFINED, style=Component.UNDEFINED, thickness=Component.UNDEFINED, innerStyle=Component.UNDEFINED, **kwargs):
self._prop_names = ['classes', 'color', 'mode', 'size', 'style', 'thickness']
self._type = 'CircularProgress'
self._namespace = 'sd_material_ui'
self._valid_wildcard_attributes = []
self.available_properties = ['classes', 'color', 'mode', 'size', 'style', 'thickness']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CircularProgress, self).__init__(**args)
| 45.438596
| 231
| 0.708494
|
ed6ed1e9012a0538265e7fc65dda0cba3b6c04d3
| 895
|
py
|
Python
|
Example/Python/Subscribe.py
|
irkasper/MQL-CopyTrade
|
0f09794632319a9a755616ebcdf5c876cc1eb8c4
|
[
"MIT"
] | 67
|
2019-04-26T07:17:55.000Z
|
2022-03-16T07:57:28.000Z
|
Example/Python/Subscribe.py
|
irkasper/MQL-CopyTrade
|
0f09794632319a9a755616ebcdf5c876cc1eb8c4
|
[
"MIT"
] | 11
|
2019-08-13T14:41:43.000Z
|
2021-12-20T09:05:21.000Z
|
Example/Python/Subscribe.py
|
irkasper/MQL-CopyTrade
|
0f09794632319a9a755616ebcdf5c876cc1eb8c4
|
[
"MIT"
] | 52
|
2019-04-22T02:00:57.000Z
|
2022-01-21T15:38:01.000Z
|
#!/usr/bin/env python
import sys
import time
import zmq
import numpy
def main():
connect_to = "tcp://127.0.0.1:5559"
topics = ""
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
s.connect(connect_to)
s.setsockopt(zmq.SUBSCRIBE, b'')
try:
while True:
recv = s.recv_multipart()
recvMsg = recv[0].decode("utf-8")
message = recvMsg.split(" ")
order = message[1].split("|")
v_action = order[0]
v_symbol = order[1]
v_ticket = order[2]
v_type = order[3]
v_openprice = order[4]
v_closeprice = order[5]
v_lots = order[6]
v_sl = order[7]
v_tp = order[8]
print("Action: ", v_action, ", Symbol: ", v_symbol)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| 22.375
| 63
| 0.507263
|
593b6b72e619ee7bf1098ad9979a637b7b236ade
| 4,648
|
py
|
Python
|
repocracy/repo/migrations/0005_auto__add_unique_repository_slug.py
|
repocracy/repocracy
|
5ca22ae23ad9a2eae4bb4c712886aff634b8ad0e
|
[
"BSD-3-Clause"
] | 1
|
2015-08-10T02:33:49.000Z
|
2015-08-10T02:33:49.000Z
|
repocracy/repo/migrations/0005_auto__add_unique_repository_slug.py
|
repocracy/repocracy
|
5ca22ae23ad9a2eae4bb4c712886aff634b8ad0e
|
[
"BSD-3-Clause"
] | null | null | null |
repocracy/repo/migrations/0005_auto__add_unique_repository_slug.py
|
repocracy/repocracy
|
5ca22ae23ad9a2eae4bb4c712886aff634b8ad0e
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Repository', fields ['slug']
db.create_unique('repo_repository', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Repository', fields ['slug']
db.delete_unique('repo_repository', ['slug'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'repo.repository': {
'Meta': {'object_name': 'Repository'},
'claim_hash': ('django.db.models.fields.CharField', [], {'default': "'da39a3ee5e6b4b0d3255bfef95601890afd80709'", 'max_length': '40'}),
'fs_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['repo']
| 63.671233
| 182
| 0.567771
|
9125087e915f447445b00bd2c2db21f1ae06b671
| 21,040
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
gnomonsis/open_model_zoo
|
72dbeb263cab19e7e232793246521e96f3821959
|
[
"Apache-2.0"
] | 1
|
2020-08-06T23:08:32.000Z
|
2020-08-06T23:08:32.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
gnomonsis/open_model_zoo
|
72dbeb263cab19e7e232793246521e96f3821959
|
[
"Apache-2.0"
] | 4
|
2021-04-30T21:21:07.000Z
|
2022-01-13T03:12:46.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
gnomonsis/open_model_zoo
|
72dbeb263cab19e7e232793246521e96f3821959
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
from collections import defaultdict, namedtuple
import numpy as np
from ..representation import (
ReIdentificationClassificationAnnotation,
ReIdentificationAnnotation,
ReIdentificationPrediction
)
from ..config import BaseField, BoolField, NumberField
from .metric import FullDatasetEvaluationMetric
try:
from sklearn.metrics import auc, precision_recall_curve
except ImportError:
auc, precision_recall_curve = None, None
PairDesc = namedtuple('PairDesc', 'image1 image2 same')
def _average_binary_score(binary_metric, y_true, y_score):
def binary_target(y):
return not (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1)
if binary_target(y_true):
return binary_metric(y_true, y_score)
y_true = y_true.ravel()
y_score = y_score.ravel()
n_classes = y_score.shape[1]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=1).ravel()
y_score_c = y_score.take([c], axis=1).ravel()
score[c] = binary_metric(y_true_c, y_score_c)
return score
class CMCScore(FullDatasetEvaluationMetric):
"""
Cumulative Matching Characteristics (CMC) score.
Config:
annotation: reid annotation.
prediction: predicted embeddings.
top_k: number of k highest ranked samples to consider when matching.
separate_camera_set: should identities from the same camera view be filtered out.
single_gallery_shot: each identity has only one instance in the gallery.
number_single_shot_repeats: number of repeats for single_gallery_shot setting.
first_match_break: break on first matched gallery sample.
"""
__provider__ = 'cmc'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'top_k': NumberField(
value_type=int, min_value=1, default=1, optional=True,
description="Number of k highest ranked samples to consider when matching."
),
'separate_camera_set': BoolField(
optional=True, default=False, description="Should identities from the same camera view be filtered out."
),
'single_gallery_shot': BoolField(
optional=True, default=False, description="Each identity has only one instance in the gallery."
),
'first_match_break': BoolField(
optional=True, default=True, description="Break on first matched gallery sample."
),
'number_single_shot_repeats': NumberField(
value_type=int, optional=True, default=10,
description="Number of repeats for single_gallery_shot setting (required for CUHK)."
)
})
return parameters
def configure(self):
self.top_k = self.get_value_from_config('top_k')
self.separate_camera_set = self.get_value_from_config('separate_camera_set')
self.single_gallery_shot = self.get_value_from_config('single_gallery_shot')
self.first_match_break = self.get_value_from_config('first_match_break')
self.number_single_shot_repeats = self.get_value_from_config('number_single_shot_repeats')
def evaluate(self, annotations, predictions):
dist_matrix = distance_matrix(annotations, predictions)
if np.size(dist_matrix) == 0:
warnings.warn('Gallery and query ids are not matched. CMC score can not be calculated.')
return 0
gallery_cameras, gallery_pids, query_cameras, query_pids = get_gallery_query_pids(annotations)
_cmc_score = eval_cmc(
dist_matrix, query_pids, gallery_pids, query_cameras, gallery_cameras, self.separate_camera_set,
self.single_gallery_shot, self.first_match_break, self.number_single_shot_repeats
)
return _cmc_score[self.top_k - 1]
class ReidMAP(FullDatasetEvaluationMetric):
"""
Mean Average Precision score.
Config:
annotation: reid annotation.
prediction: predicted embeddings.
interpolated_auc: should area under precision recall curve be computed using trapezoidal rule or directly.
"""
__provider__ = 'reid_map'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'interpolated_auc': BoolField(
optional=True, default=True, description="Should area under precision recall"
" curve be computed using trapezoidal rule or directly."
)
})
return parameters
def configure(self):
self.interpolated_auc = self.get_value_from_config('interpolated_auc')
def evaluate(self, annotations, predictions):
dist_matrix = distance_matrix(annotations, predictions)
if np.size(dist_matrix) == 0:
warnings.warn('Gallery and query ids are not matched. ReID mAP can not be calculated.')
return 0
gallery_cameras, gallery_pids, query_cameras, query_pids = get_gallery_query_pids(annotations)
return eval_map(
dist_matrix, query_pids, gallery_pids, query_cameras, gallery_cameras, self.interpolated_auc
)
class PairwiseAccuracy(FullDatasetEvaluationMetric):
__provider__ = 'pairwise_accuracy'
annotation_types = (ReIdentificationClassificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'min_score': BaseField(
optional=True, default='train_median',
description="Min score for determining that objects are different. "
"You can provide value or use train_median value which will be calculated "
"if annotations has training subset."
)
})
return parameters
def configure(self):
self.min_score = self.get_value_from_config('min_score')
def evaluate(self, annotations, predictions):
embed_distances, pairs = get_embedding_distances(annotations, predictions)
if not pairs:
return np.nan
min_score = self.min_score
if min_score == 'train_median':
train_distances, _train_pairs = get_embedding_distances(annotations, predictions, train=True)
min_score = np.median(train_distances)
embed_same_class = embed_distances < min_score
accuracy = 0
for i, pair in enumerate(pairs):
same_label = pair.same
out_same = embed_same_class[i]
correct_prediction = same_label and out_same or (not same_label and not out_same)
if correct_prediction:
accuracy += 1
return float(accuracy) / len(pairs)
class PairwiseAccuracySubsets(FullDatasetEvaluationMetric):
__provider__ = 'pairwise_accuracy_subsets'
annotation_types = (ReIdentificationClassificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'subset_number': NumberField(
optional=True, min_value=1, value_type=int, default=10, description="Number of subsets for separating."
)
})
return params
def configure(self):
self.subset_num = self.get_value_from_config('subset_number')
config_copy = self.config.copy()
if 'subset_number' in config_copy:
config_copy.pop('subset_number')
self.accuracy_metric = PairwiseAccuracy(config_copy, self.dataset)
def evaluate(self, annotations, predictions):
subset_results = []
first_images_annotations = list(filter(
lambda annotation: (len(annotation.negative_pairs) > 0 or len(annotation.positive_pairs) > 0), annotations
))
idx_subsets = self.make_subsets(self.subset_num, len(first_images_annotations))
for subset in range(self.subset_num):
test_subset = self.get_subset(first_images_annotations, idx_subsets[subset]['test'])
test_subset = self.mark_subset(test_subset, False)
train_subset = self.get_subset(first_images_annotations, idx_subsets[subset]['train'])
train_subset = self.mark_subset(train_subset)
subset_result = self.accuracy_metric.evaluate(test_subset+train_subset, predictions)
if not np.isnan(subset_result):
subset_results.append(subset_result)
return np.mean(subset_results) if subset_results else 0
@staticmethod
def make_subsets(subset_num, dataset_size):
subsets = []
if subset_num > dataset_size:
raise ValueError('It is impossible to divide dataset on more than number of annotations subsets.')
for subset in range(subset_num):
lower_bnd = subset * dataset_size // subset_num
upper_bnd = (subset + 1) * dataset_size // subset_num
subset_test = [(lower_bnd, upper_bnd)]
subset_train = [(0, lower_bnd), (upper_bnd, dataset_size)]
subsets.append({'test': subset_test, 'train': subset_train})
return subsets
@staticmethod
def mark_subset(subset_annotations, train=True):
for annotation in subset_annotations:
annotation.metadata['train'] = train
return subset_annotations
@staticmethod
def get_subset(container, subset_bounds):
subset = []
for bound in subset_bounds:
subset += container[bound[0]: bound[1]]
return subset
class FaceRecognitionTAFAPairMetric(FullDatasetEvaluationMetric):
__provider__ = 'face_recognition_tafa_pair_metric'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'threshold': NumberField(
value_type=float,
min_value=0,
optional=False,
description='Threshold value to identify pair of faces as matched'
)
})
return parameters
def configure(self):
self.threshold = self.get_value_from_config('threshold')
def submit_all(self, annotations, predictions):
return self.evaluate(annotations, predictions)
def evaluate(self, annotations, predictions):
tp = fp = tn = fn = 0
pairs = regroup_pairs(annotations, predictions)
for pair in pairs:
# Dot product of embeddings
prediction = np.dot(predictions[pair.image1].embedding, predictions[pair.image2].embedding)
# Similarity scale-shift
prediction = (prediction + 1) / 2
# Calculate metrics
if pair.same: # Pairs that match
if prediction > self.threshold:
tp += 1
else:
fp += 1
else:
if prediction < self.threshold:
tn += 1
else:
fn += 1
return [(tp+tn) / (tp+fp+tn+fn)]
class NormalizedEmbeddingAccuracy(FullDatasetEvaluationMetric):
"""
Accuracy score calculated with normalized embedding dot products
"""
__provider__ = 'normalized_embedding_accuracy'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
def submit_all(self, annotations, predictions):
return self.evaluate(annotations, predictions)
def evaluate(self, annotations, predictions):
true_positive = false_positive = 0
gallery_embeddings = extract_embeddings(annotations, predictions, query=False)
query_start_idx = len(gallery_embeddings)
for ann, pred in zip(annotations[query_start_idx:], predictions[query_start_idx:]):
best_sim = 0
pred_person_id = -1
person_id = ann.person_id
camera_id = ann.camera_id
for j, gallery_embedding in enumerate(gallery_embeddings):
gallery_person_id = annotations[j].person_id
gallery_camera_id = annotations[j].camera_id
if person_id == gallery_person_id and camera_id == gallery_camera_id:
continue
normalized_dot = np.linalg.norm(gallery_embedding) * np.linalg.norm(pred.embedding)
sim = np.dot(gallery_embedding, pred.embedding) / normalized_dot
if best_sim < sim:
best_sim = sim
pred_person_id = gallery_person_id
if pred_person_id == ann.person_id:
true_positive += 1
else:
false_positive += 1
if (true_positive + false_positive) == 0:
return [0]
accuracy = true_positive / (true_positive + false_positive)
return [accuracy]
def regroup_pairs(annotations, predictions):
image_indexes = {}
for i, pred in enumerate(predictions):
image_indexes[pred.identifier] = i
pairs = []
for image1 in annotations:
for image2 in image1.positive_pairs:
if image2 in image_indexes:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], True))
for image2 in image1.negative_pairs:
if image2 in image_indexes:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], False))
return pairs
def extract_embeddings(annotation, prediction, query):
embeddings = [pred.embedding for pred, ann in zip(prediction, annotation) if ann.query == query]
return np.stack(embeddings) if embeddings else embeddings
def get_gallery_query_pids(annotation):
gallery_pids = np.asarray([ann.person_id for ann in annotation if not ann.query])
query_pids = np.asarray([ann.person_id for ann in annotation if ann.query])
gallery_cameras = np.asarray([ann.camera_id for ann in annotation if not ann.query])
query_cameras = np.asarray([ann.camera_id for ann in annotation if ann.query])
return gallery_cameras, gallery_pids, query_cameras, query_pids
def distance_matrix(annotation, prediction):
gallery_embeddings = extract_embeddings(annotation, prediction, query=False)
query_embeddings = extract_embeddings(annotation, prediction, query=True)
not_empty = np.size(gallery_embeddings) > 0 and np.size(query_embeddings) > 0
return 1. - np.matmul(gallery_embeddings, np.transpose(query_embeddings)).T if not_empty else []
def unique_sample(ids_dict, num):
mask = np.zeros(num, dtype=np.bool)
for indices in ids_dict.values():
mask[np.random.choice(indices)] = True
return mask
def eval_map(distance_mat, query_ids, gallery_ids, query_cams, gallery_cams, interpolated_auc=False):
number_queries, _number_gallery = distance_mat.shape
# Sort and find correct matches
indices = np.argsort(distance_mat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis]) # type: np.ndarray
# Compute AP for each query
average_precisions = []
for query in range(number_queries):
# Filter out the same id and same camera
valid = (gallery_ids[indices[query]] != query_ids[query]) | (gallery_cams[indices[query]] != query_cams[query])
y_true = matches[query, valid]
y_score = -distance_mat[query][indices[query]][valid]
if not np.any(y_true):
continue
average_precisions.append(binary_average_precision(y_true, y_score, interpolated_auc=interpolated_auc))
if not average_precisions:
raise RuntimeError("No valid query")
return np.mean(average_precisions)
def eval_cmc(distance_mat, query_ids, gallery_ids, query_cams, gallery_cams, separate_camera_set=False,
single_gallery_shot=False, first_match_break=False, number_single_shot_repeats=10, top_k=100):
number_queries, _number_gallery = distance_mat.shape
if not single_gallery_shot:
number_single_shot_repeats = 1
# Sort and find correct matches
indices = np.argsort(distance_mat, axis=1)
matches = gallery_ids[indices] == query_ids[:, np.newaxis] # type: np.ndarray
# Compute CMC for each query
ret = np.zeros(top_k)
num_valid_queries = 0
for query in range(number_queries):
valid = get_valid_subset(
gallery_cams, gallery_ids, query, indices, query_cams, query_ids, separate_camera_set
) # type: np.ndarray
if not np.any(matches[query, valid]):
continue
ids_dict = defaultdict(list)
if single_gallery_shot:
gallery_indexes = gallery_ids[indices[query][valid]]
for j, x in zip(np.where(valid)[0], gallery_indexes):
ids_dict[x].append(j)
for _ in range(number_single_shot_repeats):
if single_gallery_shot:
# Randomly choose one instance for each id
# required for correct validation on CUHK datasets
# http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html
sampled = (valid & unique_sample(ids_dict, len(valid)))
index = np.nonzero(matches[query, sampled])[0]
else:
index = np.nonzero(matches[query, valid])[0]
delta = 1. / (len(index) * number_single_shot_repeats)
for j, k in enumerate(index):
if k - j >= top_k:
break
if first_match_break:
ret[k - j] += 1
break
ret[k - j] += delta
num_valid_queries += 1
if num_valid_queries == 0:
raise RuntimeError("No valid query")
return ret.cumsum() / num_valid_queries
def get_valid_subset(gallery_cams, gallery_ids, query_index, indices, query_cams, query_ids, separate_camera_set):
# Filter out the same id and same camera
valid = (
(gallery_ids[indices[query_index]] != query_ids[query_index]) |
(gallery_cams[indices[query_index]] != query_cams[query_index])
)
if separate_camera_set:
# Filter out samples from same camera
valid &= (gallery_cams[indices[query_index]] != query_cams[query_index])
return valid
def get_embedding_distances(annotation, prediction, train=False):
image_indexes = {}
for i, pred in enumerate(prediction):
image_indexes[pred.identifier] = i
pairs = []
for image1 in annotation:
if train != image1.metadata.get("train", False):
continue
if image1.identifier not in image_indexes:
continue
for image2 in image1.positive_pairs:
if image2 in image_indexes:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], True))
for image2 in image1.negative_pairs:
if image2 in image_indexes:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], False))
if pairs:
embed1 = np.asarray([prediction[idx].embedding for idx, _, _ in pairs])
embed2 = np.asarray([prediction[idx].embedding for _, idx, _ in pairs])
return 0.5 * (1 - np.sum(embed1 * embed2, axis=1)), pairs
return None, pairs
def binary_average_precision(y_true, y_score, interpolated_auc=True):
if auc is None:
raise ValueError('please install sklearn')
def _average_precision(y_true_, y_score_):
precision, recall, _ = precision_recall_curve(y_true_, y_score_)
if not interpolated_auc:
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -1 * np.sum(np.diff(recall) * np.array(precision)[:-1])
return auc(recall, precision)
return _average_binary_score(_average_precision, y_true, y_score)
| 37.437722
| 120
| 0.661454
|
9733f996b779be2a85f947ae984f165778789c59
| 9,371
|
py
|
Python
|
tests/asyncio/test_h2.py
|
ampstat/hypercorn
|
6b1e1689cddfcf9e19a63bb701d6b37a5e9f9acd
|
[
"MIT"
] | null | null | null |
tests/asyncio/test_h2.py
|
ampstat/hypercorn
|
6b1e1689cddfcf9e19a63bb701d6b37a5e9f9acd
|
[
"MIT"
] | null | null | null |
tests/asyncio/test_h2.py
|
ampstat/hypercorn
|
6b1e1689cddfcf9e19a63bb701d6b37a5e9f9acd
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from typing import AsyncGenerator, Optional
from unittest.mock import Mock
import h2
import h11
import pytest
from hypercorn.asyncio.h2 import H2Server
from hypercorn.config import Config
from hypercorn.typing import ASGIFramework
from .helpers import MockTransport
from ..helpers import chunked_response_framework, echo_framework, push_framework
BASIC_HEADERS = [(":authority", "hypercorn"), (":scheme", "https")]
BASIC_DATA = "index"
FLOW_WINDOW_SIZE = 1
class MockConnection:
def __init__(
self,
event_loop: asyncio.AbstractEventLoop,
*,
config: Config = Config(),
framework: ASGIFramework = echo_framework,
upgrade_request: Optional[h11.Request] = None,
) -> None:
self.transport = MockTransport()
self.server = H2Server( # type: ignore
framework, event_loop, config, self.transport, upgrade_request=upgrade_request
)
self.connection = h2.connection.H2Connection()
if upgrade_request is not None:
self.connection.initiate_upgrade_connection()
else:
self.connection.initiate_connection()
def send_request(self, headers: list, settings: dict) -> int:
self.connection.update_settings(settings)
self.server.data_received(self.connection.data_to_send())
stream_id = self.connection.get_next_available_stream_id()
self.connection.send_headers(stream_id, headers)
self.server.data_received(self.connection.data_to_send())
return stream_id
async def send_data(self, stream_id: int, data: bytes) -> None:
self.connection.send_data(stream_id, data)
self.server.data_received(self.connection.data_to_send())
await asyncio.sleep(0) # Yield to allow the server to process
async def end_stream(self, stream_id: int) -> None:
self.connection.end_stream(stream_id)
self.server.data_received(self.connection.data_to_send())
await asyncio.sleep(0) # Yield to allow the server to process
def close(self) -> None:
self.connection.close_connection()
self.server.data_received(self.connection.data_to_send())
async def get_events(self) -> AsyncGenerator[h2.events.Event, None]:
while True:
await self.transport.updated.wait()
events = self.connection.receive_data(self.transport.data)
self.transport.clear()
for event in events:
if isinstance(event, h2.events.ConnectionTerminated):
self.transport.close()
elif isinstance(event, h2.events.DataReceived):
self.connection.acknowledge_received_data(
event.flow_controlled_length, event.stream_id
)
self.server.data_received(self.connection.data_to_send())
yield event
if self.transport.closed.is_set():
break
@pytest.mark.asyncio
@pytest.mark.parametrize(
"headers, body",
[
(BASIC_HEADERS + [(":method", "GET"), (":path", "/")], ""),
(
BASIC_HEADERS
+ [
(":method", "POST"),
(":path", "/"),
("content-length", str(len(BASIC_DATA.encode()))),
],
BASIC_DATA,
),
],
)
async def test_request(headers: list, body: str, event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop)
stream_id = connection.send_request(headers, {})
if body != "":
await connection.send_data(stream_id, body.encode())
await connection.end_stream(stream_id)
response_data = b""
async for event in connection.get_events():
if isinstance(event, h2.events.ResponseReceived):
assert (b":status", b"200") in event.headers
assert (b"server", b"hypercorn-h2") in event.headers
assert b"date" in (header[0] for header in event.headers)
elif isinstance(event, h2.events.DataReceived):
response_data += event.data
elif isinstance(event, h2.events.StreamEnded):
connection.close()
data = json.loads(response_data.decode())
assert data["request_body"] == body
@pytest.mark.asyncio
async def test_protocol_error(event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop)
connection.server.data_received(b"broken nonsense\r\n\r\n")
assert connection.transport.closed.is_set() # H2 just closes on error
@pytest.mark.asyncio
async def test_pipelining(event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop)
streams = [
connection.send_request(BASIC_HEADERS + [(":method", "GET"), (":path", "/1")], {}),
connection.send_request(BASIC_HEADERS + [(":method", "GET"), (":path", "/1")], {}),
]
for stream_id in streams:
await connection.end_stream(stream_id)
responses = 0
async for event in connection.get_events():
if isinstance(event, h2.events.ResponseReceived):
responses += 1
elif isinstance(event, h2.events.StreamEnded) and responses == 2:
connection.close()
assert responses == len(streams)
@pytest.mark.asyncio
async def test_server_sends_chunked(event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop, framework=chunked_response_framework)
stream_id = connection.send_request(BASIC_HEADERS + [(":method", "GET"), (":path", "/")], {})
await connection.end_stream(stream_id)
response_data = b""
async for event in connection.get_events():
if isinstance(event, h2.events.DataReceived):
response_data += event.data
elif isinstance(event, h2.events.StreamEnded):
connection.close()
assert response_data == b"chunked data"
@pytest.mark.asyncio
async def test_initial_keep_alive_timeout(event_loop: asyncio.AbstractEventLoop) -> None:
config = Config()
config.keep_alive_timeout = 0.01
server = H2Server(echo_framework, event_loop, config, Mock())
await asyncio.sleep(2 * config.keep_alive_timeout)
server.transport.close.assert_called() # type: ignore
@pytest.mark.asyncio
async def test_post_response_keep_alive_timeout(event_loop: asyncio.AbstractEventLoop) -> None:
config = Config()
config.keep_alive_timeout = 0.01
connection = MockConnection(event_loop, config=config)
stream_id = connection.send_request(BASIC_HEADERS + [(":method", "GET"), (":path", "/1")], {})
connection.server.pause_writing()
await connection.end_stream(stream_id)
await asyncio.sleep(2 * config.keep_alive_timeout)
assert not connection.transport.closed.is_set()
connection.server.resume_writing()
await asyncio.sleep(2 * config.keep_alive_timeout)
assert connection.transport.closed.is_set()
events = [event async for event in connection.get_events()]
assert isinstance(events[-1], h2.events.ConnectionTerminated)
@pytest.mark.asyncio
async def test_h2server_upgrade(event_loop: asyncio.AbstractEventLoop) -> None:
upgrade_request = h11.Request(method="GET", target="/", headers=[("Host", "hypercorn")])
connection = MockConnection(event_loop, upgrade_request=upgrade_request)
response_data = b""
async for event in connection.get_events():
if isinstance(event, h2.events.ResponseReceived):
assert (b":status", b"200") in event.headers
assert (b"server", b"hypercorn-h2") in event.headers
assert b"date" in (header[0] for header in event.headers)
elif isinstance(event, h2.events.DataReceived):
response_data += event.data
elif isinstance(event, h2.events.StreamEnded):
connection.close()
@pytest.mark.asyncio
async def test_h2_flow_control(event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop)
stream_id = connection.send_request(
BASIC_HEADERS + [(":method", "GET"), (":path", "/")],
{h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: FLOW_WINDOW_SIZE},
)
await connection.end_stream(stream_id)
async for event in connection.get_events():
if isinstance(event, h2.events.DataReceived):
assert len(event.data) <= FLOW_WINDOW_SIZE
elif isinstance(event, h2.events.StreamEnded):
connection.close()
@pytest.mark.asyncio
async def test_h2_push(event_loop: asyncio.AbstractEventLoop) -> None:
connection = MockConnection(event_loop, framework=push_framework)
stream_id = connection.send_request(BASIC_HEADERS + [(":method", "GET"), (":path", "/")], {})
await connection.end_stream(stream_id)
push_received = False
streams_received = 0
async for event in connection.get_events():
if isinstance(event, h2.events.PushedStreamReceived):
assert (b":path", b"/") in event.headers
assert (b":method", b"GET") in event.headers
assert (b":scheme", b"http") in event.headers
assert (b":authority", b"hypercorn") in event.headers
push_received = True
elif isinstance(event, h2.events.StreamEnded):
streams_received += 1
if streams_received == 2:
connection.close()
assert push_received
| 40.5671
| 98
| 0.669406
|
0e5edf3efeaca6210cbe54edd1f5982a25922c14
| 3,615
|
py
|
Python
|
jia/jia/query.py
|
joshblum/chronology
|
d730355a5686ebba9f1c8e369ba26a3080d9fa26
|
[
"MIT"
] | null | null | null |
jia/jia/query.py
|
joshblum/chronology
|
d730355a5686ebba9f1c8e369ba26a3080d9fa26
|
[
"MIT"
] | null | null | null |
jia/jia/query.py
|
joshblum/chronology
|
d730355a5686ebba9f1c8e369ba26a3080d9fa26
|
[
"MIT"
] | null | null | null |
import copy
import json
import metis.core.query.aggregate
import metis.core.query.value
from flask import current_app
from metis.core.query.aggregate import GroupBy
from metis.core.query.condition import Condition
from metis.core.query.operator import Aggregate
from metis.core.query.operator import DataAccess
from metis.core.query.operator import Filter
from metis.core.query.operator import Limit
from metis.core.query.operator import OrderBy
from metis.core.query.operator import Project
from metis.core.query.value import Constant
from metis.core.query.value import Property
def cpf(args, alias=None):
if args['cpf_type'] == 'constant':
try:
constant = float(args['constant_value'])
except:
constant = args['constant_value']
return Constant(constant, alias=alias)
elif args['cpf_type'] == 'property':
return Property(args['property_name'], alias=alias)
elif args['cpf_type'] == 'function':
for i in range(len(args['function_args'])):
args['function_args'][i] = cpf(args['function_args'][i])
module = metis.core.query.value
func = args['function_name']
func_args = args['function_args']
return getattr(module, func)(func_args, alias=alias)
else:
raise ValueError("cpf_type must be constant, property, or function")
def transform(query_plan, operands):
fields = [cpf(operands['value'], alias=operands['newProperty'])]
return Project(query_plan, fields, merge=True)
def filter(query_plan, operands):
condition = Condition(operands['op'], cpf(operands['lhs']),
cpf(operands['rhs']))
return Filter(query_plan, condition)
def agg_op(agg_type, agg_on, store_in):
module = metis.core.query.aggregate
op = agg_type
agg_ons = []
if agg_on:
agg_ons.append(agg_on)
return getattr(module, op)(agg_ons, alias=store_in)
def aggregate(query_plan, operands):
aggregates = []
for agg in operands['aggregates']:
cpf_type = agg['agg_on']['cpf_type']
property_name = agg['agg_on'].get('property_name')
constant_value = agg['agg_on'].get('constant_value')
empty = (cpf_type == 'property' and not property_name or
cpf_type == 'constant' and not constant_value)
if empty:
agg_on_cpf = None
else:
agg_on_cpf = cpf(agg['agg_on'])
aggregates.append(agg_op(agg['agg_type'], agg_on_cpf, agg['alias']))
groups = []
for group in operands['groups']:
groups.append(cpf(group['field'], group['alias']))
group_by = GroupBy(groups)
return Aggregate(query_plan, group_by, aggregates)
def orderby(query_plan, operands):
fields = []
for field in operands['fields']:
fields.append(cpf(field['name']))
if operands['order']['type'] == 'asc':
order = OrderBy.ResultOrder.ASCENDING
else:
order = OrderBy.ResultOrder.DESCENDING
return OrderBy(query_plan, fields, order=order)
def limit(query_plan, operands):
return Limit(query_plan, int(operands['count']))
def create_metis_query_plan(query, start_time, end_time):
query = copy.deepcopy(query)
query_plan = {
'type': 'data_access',
'source': current_app.config['DATA_SOURCE_NAME'],
'stream': query['stream'],
'start_time': start_time,
'end_time': end_time,
}
operators = {
'transform': transform,
'filter': filter,
'aggregate': aggregate,
'orderby': orderby,
'limit': limit,
}
for step in query['steps']:
operation = step['operation']
operator = operation['operator']
operands = operation['operands']
query_plan = operators[operator](query_plan, operands)
return json.dumps({'plan': query_plan.to_dict()})
| 29.631148
| 72
| 0.698755
|
dea180e2c17aa0b8b1f43cce86919aa559182d5f
| 6,888
|
py
|
Python
|
edb/edgeql/compiler/typegen.py
|
kafein/edgedb
|
3cc4c5e6e11a6f25a82b061d7ba294deeb9ccb80
|
[
"Apache-2.0"
] | null | null | null |
edb/edgeql/compiler/typegen.py
|
kafein/edgedb
|
3cc4c5e6e11a6f25a82b061d7ba294deeb9ccb80
|
[
"Apache-2.0"
] | null | null | null |
edb/edgeql/compiler/typegen.py
|
kafein/edgedb
|
3cc4c5e6e11a6f25a82b061d7ba294deeb9ccb80
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EdgeQL compiler type-related helpers."""
from __future__ import annotations
from typing import *
from edb import errors
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.ir import utils as irutils
from edb.schema import abc as s_abc
from edb.schema import pointers as s_pointers
from edb.schema import types as s_types
from edb.schema import utils as s_utils
from edb.edgeql import ast as qlast
from . import context
from . import dispatch
from . import schemactx
from . import setgen
def type_to_ql_typeref(
t: s_types.Type,
*,
_name: Optional[str] = None,
ctx: context.ContextLevel,
) -> qlast.TypeExpr:
return s_utils.typeref_to_ast(
ctx.env.schema,
t,
disambiguate_std='std' in ctx.modaliases,
)
def ql_typeexpr_to_ir_typeref(
ql_t: qlast.TypeExpr, *,
ctx: context.ContextLevel) -> irast.TypeRef:
stype = ql_typeexpr_to_type(ql_t, ctx=ctx)
return irtyputils.type_to_typeref(
ctx.env.schema, stype, cache=ctx.env.type_ref_cache
)
def ql_typeexpr_to_type(
ql_t: qlast.TypeExpr, *,
ctx: context.ContextLevel) -> s_types.Type:
types = _ql_typeexpr_to_type(ql_t, ctx=ctx)
if len(types) > 1:
return schemactx.get_union_type(types, ctx=ctx)
else:
return types[0]
def _ql_typeexpr_to_type(
ql_t: qlast.TypeExpr, *,
ctx: context.ContextLevel) -> List[s_types.Type]:
if isinstance(ql_t, qlast.TypeOf):
with ctx.new() as subctx:
# Use an empty scope tree, to avoid polluting things pointlessly
subctx.path_scope = irast.ScopeTreeNode()
ir_set = setgen.ensure_set(dispatch.compile(ql_t.expr, ctx=subctx),
ctx=subctx)
stype = setgen.get_set_type(ir_set, ctx=subctx)
return [stype]
elif isinstance(ql_t, qlast.TypeOp):
if ql_t.op == '|':
return (_ql_typeexpr_to_type(ql_t.left, ctx=ctx) +
_ql_typeexpr_to_type(ql_t.right, ctx=ctx))
raise errors.UnsupportedFeatureError(
f'type operator {ql_t.op!r} is not implemented',
context=ql_t.context)
elif isinstance(ql_t, qlast.TypeName):
return [_ql_typename_to_type(ql_t, ctx=ctx)]
else:
raise errors.EdgeQLSyntaxError("Unexpected type expression",
context=ql_t.context)
def _ql_typename_to_type(
ql_t: qlast.TypeName, *,
ctx: context.ContextLevel) -> s_types.Type:
if ql_t.subtypes:
assert isinstance(ql_t.maintype, qlast.ObjectRef)
coll = s_types.Collection.get_class(ql_t.maintype.name)
ct: s_types.Type
if issubclass(coll, s_abc.Tuple):
t_subtypes = {}
named = False
for si, st in enumerate(ql_t.subtypes):
if st.name:
named = True
type_name = st.name
else:
type_name = str(si)
t_subtypes[type_name] = ql_typeexpr_to_type(st, ctx=ctx)
ctx.env.schema, ct = coll.from_subtypes(
ctx.env.schema, t_subtypes, {'named': named})
return ct
else:
a_subtypes = []
for st in ql_t.subtypes:
a_subtypes.append(ql_typeexpr_to_type(st, ctx=ctx))
ctx.env.schema, ct = coll.from_subtypes(ctx.env.schema, a_subtypes)
return ct
else:
return schemactx.get_schema_type(ql_t.maintype, ctx=ctx)
@overload
def ptrcls_from_ptrref( # NoQA: F811
ptrref: irast.PointerRef, *,
ctx: context.ContextLevel,
) -> s_pointers.Pointer:
...
@overload
def ptrcls_from_ptrref( # NoQA: F811
ptrref: irast.TupleIndirectionPointerRef, *,
ctx: context.ContextLevel,
) -> irast.TupleIndirectionLink:
...
@overload
def ptrcls_from_ptrref( # NoQA: F811
ptrref: irast.TypeIntersectionPointerRef, *,
ctx: context.ContextLevel,
) -> irast.TypeIntersectionLink:
...
@overload
def ptrcls_from_ptrref( # NoQA: F811
ptrref: irast.BasePointerRef, *,
ctx: context.ContextLevel,
) -> s_pointers.PointerLike:
...
def ptrcls_from_ptrref( # NoQA: F811
ptrref: irast.BasePointerRef, *,
ctx: context.ContextLevel,
) -> s_pointers.PointerLike:
cached = ctx.env.ptr_ref_cache.get_ptrcls_for_ref(ptrref)
if cached is not None:
return cached
ctx.env.schema, ptr = irtyputils.ptrcls_from_ptrref(
ptrref, schema=ctx.env.schema)
return ptr
def collapse_type_intersection_rptr(
ir_set: irast.Set, *,
ctx: context.ContextLevel,
) -> Tuple[irast.Set, List[s_pointers.Pointer]]:
ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir_set)
if not ind_ptrs:
return ir_set, []
rptr_specialization: Set[irast.PointerRef] = set()
for ind_ptr in ind_ptrs:
for ind_ptr in ind_ptrs:
if ind_ptr.ptrref.rptr_specialization:
rptr_specialization.update(
ind_ptr.ptrref.rptr_specialization)
elif (
not ind_ptr.ptrref.is_empty
and ind_ptr.source.rptr is not None
):
assert isinstance(ind_ptr.source.rptr.ptrref, irast.PointerRef)
rptr_specialization.add(ind_ptr.source.rptr.ptrref)
ptrs = [ptrcls_from_ptrref(ptrref, ctx=ctx)
for ptrref in rptr_specialization]
return ind_prefix, ptrs
def type_to_typeref(
t: s_types.Type,
env: context.Environment,
) -> irast.TypeRef:
schema = env.schema
cache = env.type_ref_cache
expr_type = t.get_expr_type(env.schema)
include_descendants = (
expr_type is s_types.ExprType.Update
or expr_type is s_types.ExprType.Delete
)
include_ancestors = (
expr_type is s_types.ExprType.Insert
or expr_type is s_types.ExprType.Update
or expr_type is s_types.ExprType.Delete
)
return irtyputils.type_to_typeref(
schema,
t,
include_descendants=include_descendants,
include_ancestors=include_ancestors,
cache=cache,
)
| 28.345679
| 79
| 0.651278
|
8c0fb8afa81907744cc59732a7aedac83c6f4745
| 342
|
py
|
Python
|
tests/test_demo.py
|
norlyakov/gino-aiohttp
|
2730bf2122dfceb61efddd6e00746fb1452ba905
|
[
"BSD-3-Clause"
] | 80
|
2020-02-09T13:13:56.000Z
|
2022-03-30T06:53:13.000Z
|
tests/test_demo.py
|
norlyakov/gino-aiohttp
|
2730bf2122dfceb61efddd6e00746fb1452ba905
|
[
"BSD-3-Clause"
] | 32
|
2020-05-11T20:35:06.000Z
|
2021-07-14T07:37:53.000Z
|
tests/test_demo.py
|
norlyakov/gino-aiohttp
|
2730bf2122dfceb61efddd6e00746fb1452ba905
|
[
"BSD-3-Clause"
] | 24
|
2020-02-04T09:28:23.000Z
|
2022-03-11T21:01:36.000Z
|
import uuid
def test(venv_client):
assert venv_client.get("/users/1").status_code == 404
nickname = str(uuid.uuid4())
r = venv_client.post("/users", json=dict(name=nickname))
r.raise_for_status()
r = r.json()
assert (
venv_client.get("/users/{}".format(r["id"])).json()["nickname"]
== nickname
)
| 24.428571
| 71
| 0.602339
|
00e50c30ac915696007dd10a30f564e71d3d1f62
| 2,292
|
py
|
Python
|
src/musictag/mutagentagloader.py
|
lmdslyngl/lmdsmusic
|
376849d278374ffee0caf56073001cd2ba8ae463
|
[
"MIT"
] | null | null | null |
src/musictag/mutagentagloader.py
|
lmdslyngl/lmdsmusic
|
376849d278374ffee0caf56073001cd2ba8ae463
|
[
"MIT"
] | 3
|
2021-06-08T21:20:20.000Z
|
2022-03-12T00:24:37.000Z
|
src/musictag/mutagentagloader.py
|
lmdslyngl/lmdsmusic
|
376849d278374ffee0caf56073001cd2ba8ae463
|
[
"MIT"
] | null | null | null |
import os
from typing import Dict
from logging import getLogger
import mutagen
from mutagen.id3 import ID3
from mutagen._vorbis import VCommentDict
from mutagen.mp4 import MP4Tags
from .util import TagLoaderException, str2int, get_or_default
def load(file_path) -> Dict[str, str]:
music_file = mutagen.File(file_path)
if music_file is None:
raise TagLoaderException("Unknown format media file.")
tags = music_file.tags
if isinstance(tags, ID3):
tag_dict = _load_id3(tags)
elif isinstance(tags, VCommentDict):
tag_dict = _load_vcomment(tags)
elif isinstance(tags, MP4Tags):
tag_dict = _load_mp4_tags(tags)
else:
# タグが検出できなかったときは空文字列としておく
tag_dict = {
"title": "",
"artist": "",
"album": "",
"year": 0
}
tag_dict["duration"] = music_file.info.length
if len(tag_dict["title"]) <= 0:
# タグにタイトルが無かったときはファイル名で代用する
filename = os.path.splitext(os.path.basename(file_path))[0]
tag_dict["title"] = filename
info_message = (
"Fallbacking to title is filename, " +
"because failed to load tag in soundfile \"{}\"")
info_message = info_message.format(filename)
getLogger(__name__).info(info_message)
return tag_dict
def _load_id3(tag: ID3) -> Dict[str, str]:
return {
"title": str(get_or_default(tag, "TIT2")),
"artist": str(get_or_default(tag, "TPE1")),
"album": str(get_or_default(tag, "TALB")),
"year": str2int(str(get_or_default(tag, "TDRC")))
}
def _load_vcomment(tag: VCommentDict) -> Dict[str, str]:
return {
"title": _join_tag_list(tag, "title"),
"artist": _join_tag_list(tag, "artist"),
"album": _join_tag_list(tag, "album"),
"year": str2int(get_or_default(tag, "date", ["0"])[0])
}
def _load_mp4_tags(tag: MP4Tags) -> Dict[str, str]:
return {
"title": _join_tag_list(tag, "\xa9nam"),
"artist": _join_tag_list(tag, "\xa9ART"),
"album": _join_tag_list(tag, "\xa9alb"),
"year": str2int(get_or_default(tag, "\xa9day", ["0"])[0])
}
def _join_tag_list(tag_dict, tag_name):
tag = get_or_default(tag_dict, tag_name, [])
return "; ".join(tag)
| 28.65
| 67
| 0.617801
|
088b9fd37731b8ca69a5941d5ef4527257401c91
| 5,233
|
py
|
Python
|
gym/envs/classic_control/mountain_car.py
|
caffett/gym
|
a9126ace2488acfaa63544ad14859d530ee4ac76
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/envs/classic_control/mountain_car.py
|
caffett/gym
|
a9126ace2488acfaa63544ad14859d530ee4ac76
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/envs/classic_control/mountain_car.py
|
caffett/gym
|
a9126ace2488acfaa63544ad14859d530ee4ac76
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
"""
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
import tensorflow as tf
from tensorflow.keras.models import load_model
ROOT = path.dirname(path.abspath(gym.__file__))+"/envs/env_approx/"
from tensorflow.keras import backend as K
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self.initial_space = spaces.Box(low=np.array([-0.6, 0]), high=np.array([-0.4, 0]), dtype=np.float32)
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action-1)*0.001 + math.cos(3*position)*(-0.0025)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position==self.min_position and velocity<0): velocity = 0
done = bool(position >= self.goal_position)
reward = -1.0/200
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def reset(self, x0=None):
if x0 is None:
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
else:
self.state = x0
return np.array(self.state)
def approximator(self, x0, step, algo, reward_approx=True):
model_name = "MountainCar-v0"
if reward_approx:
self.approx = load_model(ROOT+model_name+"/"+algo+"_ra_approx_1e+03_approx"+str(step)+".model")
else:
assert False
new_model = tf.keras.Sequential()
new_input = tf.keras.Input(tensor=tf.reshape(x0, (-1, len(self.state))))
new_model.add(new_input)
for layer in self.approx.layers:
new_model.add(layer)
sess = K.get_session()
return tf.reshape(new_model.output, (-1, 1)), sess
def _height(self, xs):
return np.sin(3 * xs)*.45+.55
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
carwidth=40
carheight=20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs-self.min_position)*scale, ys*scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l,r,t,b = -carwidth/2, carwidth/2, carheight, 0
car = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight/2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth/4,clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight/2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth/4,clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position-self.min_position)*scale
flagy1 = self._height(self.goal_position)*scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2-10), (flagx+25, flagy2-5)])
flag.set_color(.8,.8,0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation((pos-self.min_position)*scale, self._height(pos)*scale)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 34.655629
| 108
| 0.618001
|
74362fe7e7f65405a3a77334ddf3398ed0dd9ec8
| 18,802
|
py
|
Python
|
scraper/unique_scraper.py
|
uvacw/avca
|
fa07fa6d05fc3b058c75278db1d984abac0e331c
|
[
"MIT"
] | 1
|
2021-03-09T10:47:53.000Z
|
2021-03-09T10:47:53.000Z
|
scraper/unique_scraper.py
|
uvacw/avca
|
fa07fa6d05fc3b058c75278db1d984abac0e331c
|
[
"MIT"
] | null | null | null |
scraper/unique_scraper.py
|
uvacw/avca
|
fa07fa6d05fc3b058c75278db1d984abac0e331c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
## NOTE: This scraper illustrates how to collect images from websites for academic research
## THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pickle
import requests
from bs4 import BeautifulSoup
import time
import random
import datetime
import logging
import warnings
import logging as logger
import os
from sqlalchemy import text
def get_links(project, company, brand, main_url_id, url, domain, main_url, collect_links = 0, collect_images = 0, store_html = 0, level=0, links_collected = []):
print(url)
logger.info(str(url) + ' collected')
headers={'User-Agent' : "Mozilla/5.0"}
try:
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
if store_html == 1:
try:
os.mkdir(company+'_'+brand)
except:
pass
path = company+'_'+brand+'/'
filename = url.replace('/','_').replace(':', '_')
if len(filename) > 200:
filename = filename[:200]
with open(path+'html_'+filename+'.html', 'w') as f:
f.write(page.text)
base = soup.find_all('base', href=True)
if len(base) > 0:
base = base[0]['href']
else:
base = None
if collect_links == 1:
links = []
if url[-1] == '/':
url = url[:-1]
for link in soup.find_all('a', href=True):
sql = '''INSERT INTO 02_links_unique(project, company, brand, main_url_id, domain, level, link_full, link_source, link_text, link_url, status_followed, from_company) VALUES('''
link_source = url
try:
link_text = link.contents[0]
except:
link_text = ''
link_url = link['href']
link_url = link_url.replace("javascript:window.open('","").replace("','_self')","").replace("')","").replace("'","")
if link_url.startswith('http'):
link_full = link_url
elif link_url.startswith('/'):
link_full = main_url + link_url
elif link_url.startswith('./'):
if link_source.endswith('/'):
if base:
link_full = base + link_url[2:]
else:
link_full = link_source + link_url[2:]
else:
if base:
link_full = base + link_url[2:]
else:
new_source = link_source.split('/')[:-1]
new_source = '/'.join(new_source) + '/'
link_full = new_source + link_url[2:]
elif link_url.startswith('#') == False:
if link_url.startswith('javascript') == False:
if link_url.startswith('whatsapp:') == False:
if link_source.endswith('/'):
if base:
link_full = base + link_url
else:
link_full = link_source + link_url
else:
if base:
link_full = base + link_url
else:
new_source = link_source.split('/')[:-1]
new_source = '/'.join(new_source) + '/'
link_full = new_source + link_url
if link_full in links_collected:
# print(link_full, 'collected already - skipping')
pass
else:
if domain in link_full:
from_company = '1'
else:
from_company = '0'
status_followed = '0'
sql += '''"'''+ project + '''", '''
sql += '''"'''+ company + '''", '''
sql += '''"'''+ brand + '''", '''
sql += '''"'''+ str(main_url_id) + '''", '''
sql += '''"'''+ domain + '''", '''
sql += '''"'''+ str(level) + '''", '''
sql += '''"'''+ link_full + '''", '''
sql += '''"'''+ link_source + '''", '''
sql += '''"'''+ str(link_text) + '''", '''
sql += '''"'''+ link_url + '''", '''
sql += '''"'''+ status_followed + '''", '''
sql += '''"'''+ from_company + '''")'''
try:
con.execute(sql)
except Exception as e:
try:
sql = '''INSERT INTO 02_links(project, company, brand, main_url_id, domain, level, link_full, link_source, link_text, link_url, status_followed, from_company) VALUES('''
sql += '''"'''+ project + '''", '''
sql += '''"'''+ company + '''", '''
sql += '''"'''+ brand + '''", '''
sql += '''"'''+ str(main_url_id) + '''", '''
sql += '''"'''+ domain + '''", '''
sql += '''"'''+ str(level) + '''", '''
sql += '''"'''+ link_full + '''", '''
sql += '''"'''+ link_source + '''", '''
sql += '''"'''+ 'error_link_text' + '''", '''
sql += '''"'''+ link_url + '''", '''
sql += '''"'''+ status_followed + '''", '''
sql += '''"'''+ from_company + '''")'''
logger.info(str(e))
logger.info(sql)
except Exception as e:
logger.info(str(e))
logger.info(sql)
if collect_images == 1:
pics = soup.find_all('img')
for pic in pics:
width = pic.get('width', 0)
height = pic.get('height', 0)
alt_text = pic.get('alt', '')
link_url = pic.get('src', '')
link_source = url
if link_url.startswith('/'):
img_link_full = main_url + link_url
else:
img_link_full = link_url
status_downloaded = '0'
sql = '''INSERT INTO 03_images(project, company, brand, main_url_id, domain, level, link_full, link_source, link_url, status_downloaded, image_height, image_width, image_alt) VALUES('''
sql += '''"'''+ project + '''", '''
sql += '''"'''+ company + '''", '''
sql += '''"'''+ brand + '''", '''
sql += '''"'''+ str(main_url_id) + '''", '''
sql += '''"'''+ domain + '''", '''
sql += '''"'''+ str(level) + '''", '''
sql += '''"'''+ img_link_full + '''", '''
sql += '''"'''+ link_source + '''", '''
sql += '''"'''+ link_url + '''", '''
sql += '''"'''+ status_downloaded + '''", '''
sql += str(width) + ''', '''
sql += str(height) + ''', '''
sql += '''"'''+ str(alt_text) + '''")'''
# print(sql)
try:
con.execute(sql)
except Exception as e:
try:
sql = '''INSERT INTO 03_images(project, company, brand, main_url_id, domain, level, link_full, link_source, link_url, status_downloaded, image_height, image_width, image_alt) VALUES('''
sql += '''"'''+ project + '''", '''
sql += '''"'''+ company + '''", '''
sql += '''"'''+ brand + '''", '''
sql += '''"'''+ str(main_url_id) + '''", '''
sql += '''"'''+ domain + '''", '''
sql += '''"'''+ str(level) + '''", '''
sql += '''"'''+ link_full + '''", '''
sql += '''"'''+ link_source + '''", '''
sql += '''"'''+ img_link_full + '''", '''
sql += '''"'''+ status_downloaded + '''", '''
sql += '''"'''+ str(width) + '''", '''
sql += '''"'''+ str(height) + '''", '''
sql += '''"'''+ str('error') + '''")'''
con.execute(sql)
except Exception as e:
logger.info(str(e))
logger.info(sql)
time.sleep(random.uniform(0.5,5))
except Exception as e:
logger.info('error retrieving URL')
logger.info(str(url))
logger.info(str(e))
return
def run_scraper(project, company, brand, main_url_id, url, domain, main_url, collect_links = 0, collect_images = 0, store_html = 0, levels = 1, skip_level0=False):
if skip_level0 == False:
links_collected = get_links_collected(project, company, brand, status_followed = None, from_company = None)
level = 0
get_links(project, company, brand, main_url_id, url, domain, main_url, collect_links = collect_links, collect_images = collect_images, store_html = store_html, level = level)
sql = '''UPDATE 02_links SET status_followed = 1 WHERE link_full = "''' + url + '''"'''
con.execute(sql)
else:
sql = '''SELECT level FROM 02_links WHERE project ="''' + project + '''" AND company = "'''+ company + '''" AND brand = "'''+ brand + '''" ORDER BY level DESC limit 1'''
res_levels = con.execute(sql)
level = 0
if res_levels[0][0] > 0:
print('resuming at level', res_levels[0][0])
level = res_levels[0][0] -1
links_to_collect = get_links_collected(project, company, brand, status_followed = 0, from_company = 1)
links_collected = get_links_collected(project, company, brand, status_followed = None, from_company = None)
# In[ ]:
while level < levels:
links_to_collect = get_links_collected(project, company, brand, status_followed = 0, from_company = 1)
logger.info(str('links_to_collect: ' + str(links_to_collect)))
for link_full in links_to_collect:
links_collected = get_links_collected(project, company, brand, status_followed = 1, from_company = None)
logger.info(str('links_collected: ' + str(links_collected)))
try:
if link_full not in links_collected:
if link_full.endswith('.pdf'):
logger.info(str(link_full + ' skipped: PDF'))
elif 'mailto:' in link_full:
logger.info(str(link_full + ' skipped: email'))
elif link_full.endswith('.exe'):
logger.info(str(link_full + ' skipped: EXE'))
else:
get_links(project, company, brand, main_url_id, link_full, domain, main_url, collect_links = collect_links, collect_images = collect_images, store_html = store_html, level = level + 1)
sql = '''UPDATE 02_links SET status_followed = 1 WHERE link_full = "''' + link_full + '''"'''
con.execute(sql)
else:
logger.info(str(link_full + ' skipped: already collected'))
except Exception as e:
log_error(link_full, str(e))
logger.info(str(link_full + ' error'))
level += 1
print('level', level, 'completed')
def get_pending_company(project = None):
if project:
sql = '''SELECT id, project, company, brand, status, store_html, collect_links, collect_images, link_full, main_url, domain, levels, last_level_complete FROM 01_to_get WHERE status = "pending" AND levels - last_level_complete > 0 AND project = "'''+ project+'''" LIMIT 1'''
else:
sql = '''SELECT id, project, company, brand, status, store_html, collect_links, collect_images, link_full, main_url, domain, levels, last_level_complete FROM 01_to_get WHERE status = "pending" AND levels - last_level_complete > 0 LIMIT 1'''
res = con.execute(sql)
return res.cursor.fetchall()
def update_status_url(id, status):
sql = '''UPDATE 01_to_get SET status = "'''+ status+'''" WHERE id = ''' + str(id)
con.execute(sql)
def get_links_collected(project, company, brand, status_followed = None, from_company = None):
if status_followed == None:
if from_company == None:
sql = '''SELECT link_full FROM 02_links WHERE project = "''' + project + '''" AND company = "''' + company + '''" AND brand = "'''+brand+'''"'''
else:
sql = '''SELECT link_full FROM 02_links WHERE project = "''' + project + '''" AND company = "''' + company + '''" AND brand = "'''+brand+'''" AND from_company = ''' + str(from_company)
else:
if from_company == None:
sql = '''SELECT link_full FROM 02_links WHERE status_followed = ''' + str(status_followed) + ''' AND project = "''' + project + '''" AND company = "''' + company + '''" AND brand = "'''+brand+'''"'''
else:
sql = '''SELECT link_full FROM 02_links WHERE status_followed = ''' + str(status_followed) + ''' AND project = "''' + project + '''" AND company = "''' + company + '''" AND brand = "'''+brand+'''" AND from_company = ''' + str(from_company)
res = con.execute(sql)
res = [item[0] for item in res.cursor.fetchall()]
return res
def get_pending_links(project, company, brand, level):
sql = '''SELECT link_full FROM 02_links_unique WHERE project = '{project}' AND company = '{company}' AND brand = '{brand}' AND level = {level} AND status_followed = 0 AND from_company = 1'''.format(**locals())
res = con.execute(sql)
res = [item[0] for item in res.cursor.fetchall()]
return res
def get_collected_links(project, company, brand):
sql = '''SELECT link_source FROM 02_links_unique WHERE project = '{project}' AND company = '{company}' AND brand = '{brand}' '''.format(**locals())
res = con.execute(sql)
res = [item[0] for item in res.cursor.fetchall()]
sql = '''SELECT link_full FROM 02_links_unique WHERE project = '{project}' AND company = '{company}' AND brand = '{brand}' '''.format(**locals())
res2 = con.execute(sql)
res2 = [item[0] for item in res2.cursor.fetchall()]
return list(set(res + res2))
def log_error(link_full, error):
pass
def process_scraper():
from db_alchemy_scraper import con
global con
try:
logger.basicConfig(filename=str('log_' + str(datetime.datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S")))+'.log', level=logger.INFO)
pending = get_pending_company()
if len(pending) > 0:
id, project, company, brand, status, store_html, collect_links, collect_images, link_full, main_url, domain, levels, last_level_complete = pending[0]
logger.info('{id}, {project}, {company} levels {levels} last_level_complete: {last_level_complete}'.format(**locals()))
links_to_collect = get_pending_links(project, company, brand, last_level_complete)
if len(links_to_collect) == 0:
print('no links to collect, adding main link to be sure')
links_to_collect.append(link_full)
update_status_url(id, 'ongoing')
# print(links_collected)
levelnew= last_level_complete + 1
for link_to_collect in links_to_collect:
if link_to_collect.endswith('.pdf'):
logger.info(str(link_full + ' skipped: PDF'))
elif 'mailto:' in link_to_collect:
logger.info(str(link_to_collect + ' skipped: email'))
elif link_to_collect.endswith('.exe'):
logger.info(str(link_to_collect + ' skipped: EXE'))
elif link_to_collect.startswith('javascript'):
logger.info(str(link_to_collect + ' skipped: java'))
else:
link_to_collect = link_to_collect.replace("'","")
links_collected = get_collected_links(project, company, brand)
# print(link_to_collect)
get_links(project, company, brand, id, link_to_collect, domain, main_url, collect_links = collect_links, collect_images = collect_images, store_html = store_html, level=levelnew, links_collected = links_collected)
con.execute('''UPDATE 02_links_unique SET status_followed = 1 WHERE link_full = '{link_to_collect}' '''.format(**locals()))
print('{link_to_collect} completed'.format(**locals()))
links_collected.append(link_to_collect)
links_to_collect.remove(link_to_collect)
total_collected = len(links_collected)
total_to_collect = len(links_to_collect)
print('completed {link_to_collect} - total links collected = {total_collected}, total links to be collected at this level = {total_to_collect}'.format(**locals()))
logger.info('''{company} {brand} level {levelnew} completed'''.format(**locals()))
con.execute('''UPDATE 01_to_get SET last_level_complete = {levelnew} WHERE project = "{project}" AND company = "{company}" AND brand = "{brand}" '''.format(**locals()))
update_status_url(id, 'pending')
else:
print('nothing to do?')
logger.info('nothing to do?')
except Exception as e:
print(link_to_collect)
print(e)
logger.info('failed')
logger.info(str(e))
update_status_url(id, 'failed')
return
if __name__ == "__main__":
process_scraper()
| 45.635922
| 463
| 0.49633
|
b02c9b2b34bb131d2b0e883df9cf5b8f5dffea42
| 4,194
|
py
|
Python
|
devserver.py
|
OpenCIOC/offlinetools
|
d73b151b1b17029b3d61fea23f3d09455fb04901
|
[
"Apache-2.0"
] | 1
|
2018-01-31T04:39:30.000Z
|
2018-01-31T04:39:30.000Z
|
devserver.py
|
OpenCIOC/offlinetools
|
d73b151b1b17029b3d61fea23f3d09455fb04901
|
[
"Apache-2.0"
] | null | null | null |
devserver.py
|
OpenCIOC/offlinetools
|
d73b151b1b17029b3d61fea23f3d09455fb04901
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import sys
import os
import io
import logging.config
# ==========================================================================
# start of inlined methods from site.py to bootstrap site-packages directory
# covered by PSF license from Python distribution
# ==========================================================================
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def _init_pathinfo():
"""Return a set containing all existing file system items from sys.path."""
d = set()
for item in sys.path:
try:
if os.path.exists(item):
_, itemcase = makepath(item)
d.add(itemcase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
fullname = os.path.join(sitedir, name)
try:
f = io.TextIOWrapper(io.open_code(fullname))
except OSError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except OSError:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
# ==========================================================================
# end of inlined methods from site.py to bootstrap site-packages directory
# covered by PSF license from Python distribution
# ==========================================================================
def main():
print(sys.path)
app_dir = os.path.dirname(sys.executable)
print(app_dir)
paths = [app_dir]
print(paths)
addsitedir(os.path.join(app_dir, 'site-packages'))
print(sys.path)
sys.path[0:0] = paths
print(sys.path)
from paste.deploy import loadapp
from paste.httpserver import serve
import cryptography.hazmat.primitives.asymmetric.rsa # noqa
import cryptography.hazmat.bindings.openssl.binding # noqa
logging.config.fileConfig('development.ini')
app = loadapp('config:development.ini', 'main', relative_to=os.getcwd(), global_conf={})
server = serve(app, port=8765, start_loop=False)
print('starting server')
server.serve_forever()
if __name__ == '__main__':
main()
| 32.015267
| 92
| 0.564855
|
45d84dbb971ec3a65164bb7cc53a658e5187e1c5
| 1,651
|
py
|
Python
|
google/cloud/identity_toolkit_v2/types/__init__.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/identity_toolkit_v2/types/__init__.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/identity_toolkit_v2/types/__init__.py
|
renovate-bot/python-identity-toolkit
|
ccf9b1bc538d1a3de90fcefd66465b947198a1a7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .account_management_service import (
FinalizeMfaEnrollmentRequest,
FinalizeMfaEnrollmentResponse,
StartMfaEnrollmentRequest,
StartMfaEnrollmentResponse,
WithdrawMfaRequest,
WithdrawMfaResponse,
)
from .authentication_service import (
FinalizeMfaSignInRequest,
FinalizeMfaSignInResponse,
StartMfaSignInRequest,
StartMfaSignInResponse,
)
from .mfa_info import (
AutoRetrievalInfo,
FinalizeMfaPhoneRequestInfo,
FinalizeMfaPhoneResponseInfo,
StartMfaPhoneRequestInfo,
StartMfaPhoneResponseInfo,
)
__all__ = (
"FinalizeMfaEnrollmentRequest",
"FinalizeMfaEnrollmentResponse",
"StartMfaEnrollmentRequest",
"StartMfaEnrollmentResponse",
"WithdrawMfaRequest",
"WithdrawMfaResponse",
"FinalizeMfaSignInRequest",
"FinalizeMfaSignInResponse",
"StartMfaSignInRequest",
"StartMfaSignInResponse",
"AutoRetrievalInfo",
"FinalizeMfaPhoneRequestInfo",
"FinalizeMfaPhoneResponseInfo",
"StartMfaPhoneRequestInfo",
"StartMfaPhoneResponseInfo",
)
| 30.018182
| 74
| 0.761357
|
c9d8e0f0274078183967b830fafe0657c684473c
| 2,029
|
py
|
Python
|
02_primes.py
|
deshmukhmayur/tcs-codevita6-round1
|
975bd639b187fa6897d2acf58611a7e793b4072d
|
[
"MIT"
] | null | null | null |
02_primes.py
|
deshmukhmayur/tcs-codevita6-round1
|
975bd639b187fa6897d2acf58611a7e793b4072d
|
[
"MIT"
] | null | null | null |
02_primes.py
|
deshmukhmayur/tcs-codevita6-round1
|
975bd639b187fa6897d2acf58611a7e793b4072d
|
[
"MIT"
] | 1
|
2020-06-11T17:44:22.000Z
|
2020-06-11T17:44:22.000Z
|
'''
Prime Numbers spelt with prime number of letters
'''
WORDS = {
1: 'ONE',
2: 'TWO',
3: 'THREE',
4: 'FOUR',
5: 'FIVE',
6: 'SIX',
7: 'SEVEN',
8: 'EIGHT',
9: 'NINE',
0: '',
10: 'TEN',
11: 'ELEVEN',
12: 'TWELVE',
13: 'THIRTEEN',
14: 'FOURTEEN',
15: 'FIFTEEN',
16: 'SIXTEEN',
17: 'SEVENTEEN',
18: 'EIGHTEEN',
19: 'NINETEEN'
}
TENS = {
1: 'TEN',
2: 'TWENTY',
3: 'THIRTY',
4: 'FORTY',
5: 'FIFTY',
6: 'SIXTY',
7: 'SEVENTY',
8: 'EIGHTY',
9: 'NINETY'
}
def get_primes(n):
numbers = set(range(n, 1, -1))
primes = []
while numbers:
p = numbers.pop()
primes.append(p)
numbers.difference_update(set(range(p * 2, n + 1, p)))
return primes
def isPrimeWords(n):
'''
Generate words for the given input number
'''
ans = ''
l = 0
def twoDigitNums(n):
if n != '':
# print('n', n)
if int(n) > 19:
return TENS[int(n[0])] + WORDS[int(n[1])]
else:
return WORDS[int(n)]
return ''
if len(n) < 5:
n = '0' * (5 - len(n)) + n
tens, h, th = n[-2:], n[-3], n[:-3]
thousands = twoDigitNums(th)
if thousands != '':
ans += thousands + 'THOUSAND'
hundreds = WORDS[int(h)]
if hundreds != '':
ans += hundreds + 'HUNDRED'
units = twoDigitNums(tens)
if units != '':
if ans != '':
ans += 'AND' + units
else:
ans += units
# print(ans, len(ans))
if len(ans) in PRIMES:
return True
return False
if __name__ == '__main__':
N1, N2 = [int(x.strip()) for x in input().strip().split()]
PRIMES = get_primes(N2)
count = 0
for num in range(N1, N2 + 1):
if num in PRIMES:
if isPrimeWords(str(num)):
count += 1
print(count)
| 19.509615
| 63
| 0.435683
|
33a22b0475647395bac9a2cb91e61be39872df70
| 6,073
|
py
|
Python
|
leapfrog/poll/vimeo.py
|
markpasc/leapfrog
|
fde06e10cb896936f3f6535bd3f31c64700a3c8d
|
[
"MIT"
] | 3
|
2015-11-05T07:19:49.000Z
|
2020-10-07T08:50:28.000Z
|
leapfrog/poll/vimeo.py
|
markpasc/leapfrog
|
fde06e10cb896936f3f6535bd3f31c64700a3c8d
|
[
"MIT"
] | null | null | null |
leapfrog/poll/vimeo.py
|
markpasc/leapfrog
|
fde06e10cb896936f3f6535bd3f31c64700a3c8d
|
[
"MIT"
] | null | null | null |
from __future__ import division
from datetime import datetime
import json
import logging
import re
import socket
from urllib import urlencode
from django.conf import settings
import httplib2
import oauth2 as oauth
from leapfrog.models import Account, Media, Person, Object, UserStream
import leapfrog.poll.embedlam
log = logging.getLogger(__name__)
def call_vimeo(method, token=None, **kwargs):
csr = oauth.Consumer(*settings.VIMEO_CONSUMER)
http_url = 'http://vimeo.com/api/rest/v2?format=json&method=%s' % method
if kwargs:
http_url = '&'.join((http_url, urlencode(kwargs)))
oauth_request = oauth.Request.from_consumer_and_token(csr, token,
http_method='GET', http_url=http_url)
oauth_sign_method = oauth.SignatureMethod_HMAC_SHA1()
oauth_request.sign_request(oauth_sign_method, csr, token)
oauth_signing_base = oauth_sign_method.signing_base(oauth_request, csr, token)
oauth_header = oauth_request.to_header()
h = httplib2.Http()
h.follow_redirects = 0
normal_url = oauth_request.to_url()
log.debug('Making request to URL %r', normal_url)
try:
resp, content = h.request(normal_url, method=oauth_request.method,
headers=oauth_header)
except socket.error, exc:
raise leapfrog.poll.embedlam.RequestError("Request to %s could not complete: %s" % (uri, str(exc)))
if resp.status == 502:
raise leapfrog.poll.embedlam.RequestError("502 Bad Gateway making Vimeo request %s" % normal_url)
if resp.status == 503:
raise leapfrog.poll.embedlam.RequestError("503 Service Unavailable making Vimeo request %s" % normal_url)
if resp.status == 500:
raise leapfrog.poll.embedlam.RequestError("500 Server Error making Vimeo request %s" % normal_url)
if resp.status == 404:
raise leapfrog.poll.embedlam.RequestError("404 Not Found making Vimeo request %s, wtf" % normal_url)
if resp.status != 200:
raise ValueError("Unexpected response making Vimeo request %s: %d %s" % (normal_url, resp.status, resp.reason))
data = json.loads(content)
if data['stat'] != 'fail':
return data
err = data['err']
if method == 'vimeo.videos.getSubscriptions' and err['msg'] == 'Internal error.':
raise leapfrog.poll.embedlam.RequestError("Internal error getting Vimeo subscriptions (try again later?)")
raise ValueError("Error retrieving data for %s call: %s: %s" % (method, err['msg'], err['expl']))
def account_for_vimeo_id(user_id, person=None):
try:
# TODO: update vimeo avatar pictures (but that requires fetching their people info speculatively)
return Account.objects.get(service='vimeo.com', ident=user_id)
except Account.DoesNotExist:
pass
# get vimeo data
log.debug('Getting info on user %r', user_id)
userdata = call_vimeo('vimeo.people.getInfo', user_id=user_id)
persondata = userdata['person']
if person is None:
portraits = persondata.get('portraits', {}).get('portrait')
avatar = None
if portraits is not None:
portraits = sorted([portrait for portrait in portraits if int(portrait['height']) >= 75], key=lambda x: int(x['height']))
if portraits:
portrait = portraits[0]
avatar = Media(
image_url=portrait['_content'],
width=int(portrait['width']),
height=int(portrait['height']),
)
avatar.save()
person = Person(
display_name=persondata['display_name'],
permalink_url=persondata['profileurl'],
avatar=avatar,
)
person.save()
acc = Account(
service='vimeo.com',
ident=user_id,
display_name=persondata.get('display_name', persondata.get('username', user_id)),
person=person,
)
acc.save()
return acc
def object_from_video_data(videodata):
video_id = videodata['id']
try:
return Object.objects.get(service='vimeo.com', foreign_id=video_id)
except Object.DoesNotExist:
pass
author = account_for_vimeo_id(videodata['owner']['id'])
permalink_url = [urldata['_content'] for urldata in videodata['urls']['url'] if urldata['type'] == 'video'][0]
width, height = [int(videodata[key]) for key in ('width', 'height')]
if width > 660:
height = 660 * height / width
width = 660
body = ("""<iframe src="http://player.vimeo.com/video/%s" width="%d" height="%d"></iframe>"""
% (video_id, width, height))
obj = Object(
service='vimeo.com',
foreign_id=video_id,
render_mode='mixed',
title=videodata['title'],
body=body,
time=datetime.strptime(videodata['upload_date'], '%Y-%m-%d %H:%M:%S'),
permalink_url=permalink_url,
author=author,
)
obj.save()
return obj
def object_from_url(url):
mo = re.match(r'http://vimeo\.com/ (\d+)', url, re.MULTILINE | re.DOTALL | re.VERBOSE)
if mo is None:
return
video_id = mo.group(1)
videoresp = call_vimeo('vimeo.videos.getInfo', video_id=video_id)
videodata = videoresp['video'][0]
return object_from_video_data(videodata)
def poll_vimeo(account):
user = account.person.user
if user is None:
return
token = oauth.Token(*account.authinfo.split(':'))
try:
subdata = call_vimeo('vimeo.videos.getSubscriptions', token=token, full_response='true')
except leapfrog.poll.embedlam.RequestError:
log.debug("An expected error occurred getting Vimeo subscriptions, tsk", exc_data=True)
return
for videodata in subdata['videos']['video']:
try:
obj = object_from_video_data(videodata)
# TODO: save videos from "like" subscriptions as shares
UserStream.objects.get_or_create(user=user, obj=obj,
defaults={'time': obj.time, 'why_account': obj.author, 'why_verb': 'post'})
except Exception, exc:
log.exception(exc)
| 35.30814
| 133
| 0.649597
|
ddd343eb59d86eeaf9845bf198051a9a5bf6b7e6
| 6,109
|
py
|
Python
|
nova/virt/hyperv/pathutils.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | 1
|
2015-02-26T03:23:49.000Z
|
2015-02-26T03:23:49.000Z
|
nova/virt/hyperv/pathutils.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/hyperv/pathutils.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | 2
|
2015-06-17T13:24:55.000Z
|
2015-10-27T05:28:38.000Z
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from os_win.utils import pathutils
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova.virt.hyperv import constants
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
ERROR_INVALID_NAME = 123
# NOTE(claudiub): part of the pre-existing PathUtils is nova-specific and
# it does not belong in the os-win library. In order to ensure the same
# functionality with the least amount of changes necessary, adding as a mixin
# the os_win.pathutils.PathUtils class into this PathUtils.
class PathUtils(pathutils.PathUtils):
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self.check_remove_dir(path)
if create_dir:
self.check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise exception.AdminRequired(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
def lookup_configdrive_path(self, instance_name):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext,
remote_server=None):
instance_path = self.get_instance_dir(instance_name, remote_server)
return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, vm_name, remote_server=None):
instance_dir = self.get_instance_dir(vm_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
| 41.842466
| 79
| 0.641021
|
b4fe361be2385e11ed63fe65057ec4424631dcd4
| 6,116
|
py
|
Python
|
spektral/layers/pooling/mincut_pool.py
|
herman-nside/spektral
|
58bb524ec783f187145c3afe53db491dbc1f0ba0
|
[
"MIT"
] | 2
|
2021-02-21T10:02:38.000Z
|
2021-02-21T10:02:43.000Z
|
spektral/layers/pooling/mincut_pool.py
|
herman-nside/spektral
|
58bb524ec783f187145c3afe53db491dbc1f0ba0
|
[
"MIT"
] | null | null | null |
spektral/layers/pooling/mincut_pool.py
|
herman-nside/spektral
|
58bb524ec783f187145c3afe53db491dbc1f0ba0
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.pool import Pool
class MinCutPool(Pool):
r"""
A MinCut pooling layer from the paper
> [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481)<br>
> Filippo Maria Bianchi et al.
**Mode**: batch.
This layer computes a soft clustering \(\S\) of the input graphs using a MLP,
and reduces graphs as follows:
$$
\S = \textrm{MLP}(\X); \\
\A' = \S^\top \A \S; \X' = \S^\top \X;
$$
where MLP is a multi-layer perceptron with softmax output.
Two auxiliary loss terms are also added to the model: the _minCUT loss_
$$
- \frac{ \mathrm{Tr}(\S^\top \A \S) }{ \mathrm{Tr}(\S^\top \D \S) }
$$
and the _orthogonality loss_
$$
\left\|
\frac{\S^\top \S}{\| \S^\top \S \|_F}
- \frac{\I_K}{\sqrt{K}}
\right\|_F.
$$
The layer can be used without a supervised loss, to compute node clustering
simply by minimizing the two auxiliary losses.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Symmetrically normalized adjacency matrix of shape `([batch], n_nodes, n_nodes)`;
**Output**
- Reduced node features of shape `([batch], K, n_node_features)`;
- Reduced adjacency matrix of shape `([batch], K, K)`;
- If `return_mask=True`, the soft clustering matrix of shape `([batch], n_nodes, K)`.
**Arguments**
- `k`: number of nodes to keep;
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP used to compute cluster assignments (if None, the MLP has
only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `return_mask`: boolean, whether to return the cluster assignment matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
return_mask=False,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = mlp_activation
self.return_mask = return_mask
def build(self, input_shape):
assert isinstance(input_shape, list)
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
mlp_layers = []
for i, channels in enumerate(self.mlp_hidden):
mlp_layers.append(Dense(channels, self.mlp_activation, **layer_kwargs))
mlp_layers.append(Dense(self.k, "softmax", **layer_kwargs))
self.mlp = Sequential(mlp_layers)
super().build(input_shape)
def call(self, inputs):
if len(inputs) == 3:
X, A, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X, A = inputs
I = None
# Check if the layer is operating in batch mode (X and A have rank 3)
batch_mode = K.ndim(X) == 3
# Compute cluster assignment matrix
S = self.mlp(X)
# MinCut regularization
A_pooled = ops.matmul_at_b_a(S, A)
num = tf.linalg.trace(A_pooled)
D = ops.degree_matrix(A)
den = tf.linalg.trace(ops.matmul_at_b_a(S, D)) + K.epsilon()
cut_loss = -(num / den)
if batch_mode:
cut_loss = K.mean(cut_loss)
self.add_loss(cut_loss)
# Orthogonality regularization
SS = ops.modal_dot(S, S, transpose_a=True)
I_S = tf.eye(self.k, dtype=SS.dtype)
ortho_loss = tf.norm(
SS / tf.norm(SS, axis=(-1, -2), keepdims=True) - I_S / tf.norm(I_S),
axis=(-1, -2),
)
if batch_mode:
ortho_loss = K.mean(ortho_loss)
self.add_loss(ortho_loss)
# Pooling
X_pooled = ops.modal_dot(S, X, transpose_a=True)
A_pooled = tf.linalg.set_diag(
A_pooled, tf.zeros(K.shape(A_pooled)[:-1], dtype=A_pooled.dtype)
) # Remove diagonal
A_pooled = ops.normalize_A(A_pooled)
output = [X_pooled, A_pooled]
if I is not None:
I_mean = tf.math.segment_mean(I, I)
I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k)
output.append(I_pooled)
if self.return_mask:
output.append(S)
return output
@property
def config(self):
return {
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
"return_mask": self.return_mask,
}
| 33.420765
| 110
| 0.609222
|
b3982753210db31ccd9d9ee5312c5065fa012c4b
| 22,065
|
py
|
Python
|
huobi/client/market.py
|
Reactive-Capital/huobi_Python
|
1ba61f50be514b42a9fa3c2fe5d894e496b042e5
|
[
"Apache-2.0"
] | null | null | null |
huobi/client/market.py
|
Reactive-Capital/huobi_Python
|
1ba61f50be514b42a9fa3c2fe5d894e496b042e5
|
[
"Apache-2.0"
] | null | null | null |
huobi/client/market.py
|
Reactive-Capital/huobi_Python
|
1ba61f50be514b42a9fa3c2fe5d894e496b042e5
|
[
"Apache-2.0"
] | null | null | null |
from huobi.constant import *
from huobi.model.market import *
from huobi.utils import *
from huobi.utils.input_checker import check_in_list
class MarketClient(object):
def __init__(self, **kwargs):
"""
Create the request client instance.
:param kwargs: The option of request connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: The URL name like "https://api.huobi.pro".
init_log: to init logger
"""
self.__kwargs = kwargs
def get_candlestick(self, symbol, period, size=200):
"""
Get the candlestick/kline for the specified symbol. The data number is 150 as default.
:param symbol: The symbol, like "btcusdt". To query hb10, put "hb10" at here. (mandatory)
:param period: The candlestick/kline interval, MIN1, MIN5, DAY1 etc. (mandatory)
:param size: The start time of of requested candlestick/kline data. (optional)
:return: The list of candlestick/kline data.
"""
check_symbol(symbol)
check_should_not_none(period, "period")
check_range(size, 1, 2000, "size")
params = {
"symbol": symbol,
"period": period,
"size": size
}
from huobi.service.market.get_candlestick import GetCandleStickService
return GetCandleStickService(params).request(**self.__kwargs)
def sub_candlestick(self, symbols: 'str', interval: 'CandlestickInterval', callback, error_handler):
"""
Subscribe candlestick/kline event. If the candlestick/kline is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param interval: The candlestick/kline interval, MIN1, MIN5, DAY1 etc.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(candlestick_event: 'CandlestickEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(interval, "interval")
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
"interval" : interval,
}
from huobi.service.market.sub_candlestick import SubCandleStickService
SubCandleStickService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_candlestick(self, symbols: 'str', interval: 'CandlestickInterval', callback,
from_ts_second = None, end_ts_second = None, error_handler=None):
"""
Subscribe candlestick/kline event. If the candlestick/kline is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param interval: The candlestick/kline interval, MIN1, MIN5, DAY1 etc.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(candlestick_event: 'CandlestickEvent'):
pass
:param from_ts_second : data from timestamp [it's second]
:param end_ts_second : data util timestamp [it's second]
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(interval, "interval")
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
"interval" : interval,
"from_ts_second" : from_ts_second,
"end_ts_second" : end_ts_second
}
from huobi.service.market.req_candlestick import ReqCandleStickService
ReqCandleStickService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_pricedepth(self, symbol: 'str', depth_type: 'str', depth_size: 'int' = None) -> PriceDepth:
"""
Get the Market Depth of a symbol.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param depth_type: The tpye, like "step0" to "step5". (mandatory)
:param depth_size(optional): The maximum number of Market Depth step0 requested. range [1 - 150], default is 150
The maximum number of Market Depth step1,step2,step3,step4,step5 requested. size is in [5, 10, 20], default is 20.
:return: Market Depth data.
"""
check_symbol(symbol)
check_in_list(depth_type, [DepthStep.STEP0, DepthStep.STEP1, DepthStep.STEP2, DepthStep.STEP3, DepthStep.STEP4, DepthStep.STEP5], "depth_type")
params = {
"symbol": symbol,
"type": depth_type,
# "depth": depth_size
}
from huobi.service.market.get_pricedepth import GetPriceDepthService
ret_data = GetPriceDepthService(params).request(**self.__kwargs)
if depth_size is not None:
if (ret_data.bids is not None) and (len(ret_data.bids) > depth_size):
ret_data.bids = ret_data.bids[0:depth_size]
if (ret_data.asks is not None) and (len(ret_data.asks) > depth_size):
ret_data.asks = ret_data.asks[0:depth_size]
return ret_data
@staticmethod
def get_depth_step_list():
return [DepthStep.STEP0,
DepthStep.STEP1,
DepthStep.STEP2,
DepthStep.STEP3,
DepthStep.STEP4,
DepthStep.STEP5]
@staticmethod
def get_valid_depth_step(value, defalut_value):
step_list = MarketClient.get_depth_step_list()
if value in step_list:
return value
else:
return defalut_value
def sub_pricedepth(self, symbols: 'str', depth_step: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param depth_step: The depth precision, string from step0 to step5.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
new_step = MarketClient.get_valid_depth_step(value=depth_step, defalut_value=DepthStep.STEP0)
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
"step" : new_step,
}
from huobi.service.market.sub_pricedepth import SubPriceDepthService
SubPriceDepthService(params).subscribe(callback, error_handler, **self.__kwargs)
def sub_pricedepth_bbo(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
}
from huobi.service.market.sub_pricedepth_bbo import SubPriceDepthBboService
SubPriceDepthBboService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_pricedepth(self, symbols: 'str', depth_step: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param depth_step: The depth precision, string from step0 to step5.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
new_step = MarketClient.get_valid_depth_step(value=depth_step, defalut_value=DepthStep.STEP0)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"step": new_step,
}
from huobi.service.market.req_pricedepth import ReqPriceDepthService
ReqPriceDepthService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_detail(self, symbol: 'str') -> MarketDetail:
"""
Get trade statistics in 24 hours.
:param symbol: The symbol, like "btcusdt". (mandatory)
:return: Trade statistics.
"""
check_symbol(symbol)
params = {
"symbol": symbol,
}
from huobi.service.market.get_market_detail import GetMarketDetailService
return GetMarketDetailService(params).request(**self.__kwargs)
def sub_market_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe 24 hours trade statistics event. If statistics is generated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_statistics_event: 'TradeStatisticsEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
}
from huobi.service.market.sub_market_detail import SubMarketDetailService
SubMarketDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_market_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe 24 hours trade statistics event. If statistics is generated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_statistics_event: 'TradeStatisticsEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from huobi.service.market.req_market_detail import ReqMarketDetailService
ReqMarketDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_trade(self, symbol: 'str') -> list:
"""
Get the most recent trades with their price, volume and direction.
:param symbol: The symbol, like "btcusdt". (mandatory)
:return: The list of trade.
"""
check_symbol(symbol)
params = {
"symbol": symbol,
}
from huobi.service.market.get_market_trade import GetMarketTradeService
return GetMarketTradeService(params).request(**self.__kwargs)
def get_history_trade(self, symbol: 'str', size: 'int' = None) -> list:
"""
Get the most recent trades with their price, volume and direction.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param size: The number of historical trade requested, range [1 - 2000] (optional)
:return: The list of trade.
"""
check_symbol(symbol)
check_range(size, 1, 2000, "size")
params = {
"symbol": symbol,
"size" : size
}
from huobi.service.market.get_history_trade import GetHistoryTradeService
return GetHistoryTradeService(params).request(**self.__kwargs)
def sub_trade_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_event: 'TradeEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
}
from huobi.service.market.sub_trade_detail import SubTradeDetailService
SubTradeDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_trade_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_event: 'TradeEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
}
from huobi.service.market.req_trade_detail import ReqTradeDetailService
ReqTradeDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_detail_merged(self, symbol):
check_symbol(symbol)
params = {
"symbol": symbol
}
from huobi.service.market.get_market_detail_merged import GetMarketDetailMergedService
return GetMarketDetailMergedService(params).request(**self.__kwargs)
def get_market_tickers(self) -> list:
"""
get market tickers
:return: market ticker list.
"""
params = {}
from huobi.service.market.get_market_tickers import GetMarketTickersService
return GetMarketTickersService(params).request(**self.__kwargs)
"""
increase mbp(market by price)
"""
def sub_mbp_increase(self, symbols: 'str', levels: 'int', callback, error_handler=None):
"""
Subscribe mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20,150. current only support 150
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list" : symbol_list,
"levels" : levels
}
from huobi.service.market.sub_mbp_increase import SubMbpIncreaseService
SubMbpIncreaseService(params).subscribe(callback, error_handler, **self.__kwargs)
"""
subscribe full mbp(market by price)
"""
def sub_mbp_full(self, symbols: 'str', levels: 'int', callback, error_handler=None):
"""
Subscribe full mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"levels": levels
}
from huobi.service.market.sub_mbp_full import SubMbpFullService
SubMbpFullService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_mbp(self, symbols: 'str', levels: 'int', callback, auto_close = True, error_handler=None):
"""
Subscribe mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20,150. current only support 150
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param auto_close : close websocket connection after get data
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"levels": levels
}
from huobi.service.market.req_mbp import ReqMbpService
ReqMbpService(params).subscribe(callback, error_handler, **self.__kwargs)
| 44.485887
| 157
| 0.653705
|
87b623971f6ce4c2d43cab30e8be7cf30931d68c
| 3,692
|
py
|
Python
|
Sketches/JMB/mysite/settings.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/JMB/mysite/settings.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/JMB/mysite/settings.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '/home/jason/mysite/mysite.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'bbfgxp&2+t&=yo!0@wey-_n4fcxhx8gdllmp%1s#%z85w_opv5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
"django.contrib.admin",
'mysite.polls'
)
| 36.196078
| 111
| 0.728061
|
81b1d64161ef00d03f8a99a9d649d9d3942f4fcd
| 4,443
|
py
|
Python
|
fb_chat.py
|
ShivamSarodia/FB-Chat-Parser
|
bbeb6161e9555a20e0bee87520535345e3234c9b
|
[
"MIT"
] | 11
|
2015-05-02T16:23:13.000Z
|
2021-08-31T03:39:39.000Z
|
fb_chat.py
|
ShivamSarodia/FB-Chat-Parser
|
bbeb6161e9555a20e0bee87520535345e3234c9b
|
[
"MIT"
] | 2
|
2016-01-24T01:03:53.000Z
|
2017-08-22T22:45:05.000Z
|
fb_chat.py
|
ShivamSarodia/FB-Chat-Parser
|
bbeb6161e9555a20e0bee87520535345e3234c9b
|
[
"MIT"
] | 2
|
2016-12-19T17:37:02.000Z
|
2017-02-27T19:09:03.000Z
|
from datetime import datetime
import pickle
def load_data(filename):
"""Return a Data object loaded from the given pickle file."""
with open(filename, "rb") as f:
return pickle.load(f)
class Data:
def __init__(self, threads, me=None):
self.threads = threads
if me:
self.me = me
else:
counts = {}
# set 'me' as the most common sender in the threads
for thread in self.threads:
for mess in thread.messages:
counts[mess.sender] = counts.get(mess.sender, 0) + 1
self.me = max(counts, key=lambda x: counts[x])
for thread in self.threads:
thread.add_me(self.me)
def query(self, *,
indv=None,
chat_name=None,
members=None,
first=True):
"""Query the threads in this chat data.
indv (string) - Return the individual chat thread with this person.
chat_name (string) - Return the chat thread with this name.
member (iterable of names) - Return the chat thread with these members.
first (bool) - If true, return only the longest match.
Exactly one of indv, chat_name, and members may be specified.
"""
if [indv, chat_name, members].count(None) < 2:
raise ValueError("Multiple query types may not be specified.")
if indv:
threads = [t for t in self.threads
if t.name == indv and
t.members == set((self.me, indv))]
elif chat_name:
threads = [t for t in self.threads if t.name == chat_name]
elif members:
members = set(members)
members.add(self.me)
threads = [t for t in self.threads if t.members == members]
if first:
return max(threads, key=len, default=None)
else:
return threads
def __getitem__(self, key):
return self.threads[key]
def __iter__(self):
return iter(self.threads)
def __len__(self):
return len(self.threads)
class Thread:
def __init__(self, name, messages, raw_members, filename):
self.name = name
self.filename = filename
self.messages = messages
self.me = None
# We take the raw members and add everyone who spoke in the
# chat to get the final members list.
self.members = set(raw_members)
for message in messages:
self.members.add(message.sender)
def add_me(self, me):
"""Add the given person as a member of this thread and as the self.me
for this thread.
This function is called automatically by the Data constructor,
so that each individual thread has both you and the other
people as members.
"""
self.me = me
self.members.add(me)
def query(self, *,
sender=None,
senders=None,
start=datetime.min,
end=datetime.max):
"""Query this thread's messages.
start (datetime) - return only messages after this time (inclusive)
end (datetime) - return only messages before this time (exclusive)
sender (string) - return only messages by the given sender
senders (iterable of strings) - return only messages by one of the given senders
"""
if sender and senders:
raise ValueError("`sender` and `senders` cannot be simultaneously specified")
if sender:
senders = set([sender])
def condition(m):
return (start <= m.time < end) and (not senders or m.sender in senders)
return [m for m in self.messages if condition(m)]
def get_my_messages(self):
return self.query(sender=self.me)
def __repr__(self):
return "<Thread of {} named {}>".format(
", ".join(self.members), self.name)
def __getitem__(self, key):
return self.messages[key]
def __iter__(self):
return iter(self.messages)
def __len__(self):
return len(self.messages)
class Message:
def __init__(self, sender, text, time):
self.sender = sender
self.text = text
self.time = time
def __len__(self):
return len(self.text)
def __repr__(self):
return "(" + str(self.time) + ") " + self.sender + ": " + self.text
| 29.818792
| 89
| 0.578438
|
e89f2792f7c5b9c6b7ebc32552b3d39d93730dd7
| 10,656
|
py
|
Python
|
spirecomm/ai/behaviours.py
|
joshmiller17/spirecomm
|
57e0d4e773e0e7accbfb4eb8f512f090432edb08
|
[
"MIT"
] | 2
|
2019-08-24T15:52:51.000Z
|
2019-08-24T15:53:33.000Z
|
spirecomm/ai/behaviours.py
|
joshmiller17/spirecomm
|
57e0d4e773e0e7accbfb4eb8f512f090432edb08
|
[
"MIT"
] | 61
|
2019-05-18T22:40:52.000Z
|
2019-08-24T14:10:51.000Z
|
spirecomm/ai/behaviours.py
|
joshmiller17/spirecomm
|
57e0d4e773e0e7accbfb4eb8f512f090432edb08
|
[
"MIT"
] | null | null | null |
import py_trees
import json
from spirecomm.communication.action import *
# This is the Template class from which all StS Behaviours inherit
# It also includes the original comments for what should go in each method
class DefaultBehaviour(py_trees.behaviour.Behaviour):
def __init__(self, name, agent):
"""
Minimal one-time initialisation. A good rule of thumb is
to only include the initialisation relevant for being able
to insert this behaviour in a tree for offline rendering to
dot graphs.
Other one-time initialisation requirements should be met via
the setup() method.
"""
super(DefaultBehaviour, self).__init__(name)
self.agent = agent
def log(self, msg, debug=4):
self.agent.log(str(self.name) + " [" + str(self.__class__.__name__) + "]: " + msg, debug=debug)
def setup(self):
"""
When is this called?
This function should be either manually called by your program
to setup this behaviour alone, or more commonly, via
:meth:`~py_trees.behaviour.Behaviour.setup_with_descendants`
or :meth:`~py_trees.trees.BehaviourTree.setup`, both of which
will iterate over this behaviour, it's children (it's children's
children ...) calling :meth:`~py_trees.behaviour.Behaviour.setup`
on each in turn.
If you have vital initialisation necessary to the success
execution of your behaviour, put a guard in your
:meth:`~py_trees.behaviour.Behaviour.initialise` method
to protect against entry without having been setup.
What to do here?
Delayed one-time initialisation that would otherwise interfere
with offline rendering of this behaviour in a tree to dot graph
or validation of the behaviour's configuration.
Good examples include:
- Hardware or driver initialisation
- Middleware initialisation (e.g. ROS pubs/subs/services)
- A parallel checking for a valid policy configuration after
children have been added or removed
"""
pass
def initialise(self):
"""
When is this called?
The first time your behaviour is ticked and anytime the
status is not RUNNING thereafter.
What to do here?
Any initialisation you need before putting your behaviour
to work.
"""
pass
def update(self):
"""
When is this called?
Every time your behaviour is ticked.
What to do here?
- Triggering, checking, monitoring. Anything...but do not block!
- Set a feedback message
- return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE]
"""
return py_trees.common.Status.SUCCESS
def terminate(self, new_status):
"""
When is this called?
Whenever your behaviour switches to a non-running state.
- SUCCESS || FAILURE : your behaviour's work cycle has finished
- INVALID : a higher priority branch has interrupted, or shutting down
"""
pass
#like Sequence, but with a to_json method
class SequenceBehaviour(py_trees.composites.Sequence):
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "SequenceBehaviour"
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
#like Selector, but with a to_json method
class SelectorBehaviour(py_trees.composites.Selector):
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "SelectorBehaviour"
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# A test-only class, returns the default logic of what the original AI would have done
class TestBehaviour(DefaultBehaviour):
def update(self):
self.log("tick", debug=6)
self.agent.cmd_queue.append(self.agent.default_logic(self.agent.blackboard.game))
return py_trees.common.Status.SUCCESS
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "TestBehaviour"
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent)
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# Temporary behaviour, remove when behaviour tree is more fully realized
# calls a custom function to handle complex logic for us
class CustomBehaviour(DefaultBehaviour):
def __init__(self, name, agent, function):
super(CustomBehaviour, self).__init__(name, agent)
self.function = function
def update(self):
self.agent.cmd_queue.append(getattr(self.agent, self.function)())
return py_trees.common.Status.SUCCESS
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "CustomBehaviour"
attrDict["function"] = self.function
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent,d["function"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# Returns success iff a blackboard.game boolean is true
# To invert this logic, set success=False: behaviour will then return true iff bool is false
class BoolCheckBehaviour(DefaultBehaviour):
def __init__(self, name, agent, boolean, success=True):
super(BoolCheckBehaviour, self).__init__(name, agent)
self.boolean = boolean
self.success = success
def update(self):
value = getattr(self.agent.blackboard.game, self.boolean)
ret = value if self.success else not value # invert bool if that's what we want to check
retStr = "SUCCESS" if ret else "FAILURE"
self.log(str(self.boolean) + " is " + str(value) + ": " + retStr, debug=6)
return py_trees.common.Status.SUCCESS if ret else py_trees.common.Status.FAILURE
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "BoolCheckBehaviour"
attrDict["boolean"] = self.boolean
attrDict["success"] = self.success
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent,d["boolean"],d["success"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# Returns success iff values are equal
# To invert this logic, set success=False: behaviour will then return true iff values are not equal
class EqualityCheckBehaviour(BoolCheckBehaviour):
def __init__(self, name, agent, first, second, success=True):
super(EqualityCheckBehaviour, self).__init__(name, agent, first, success)
self.first = first
self.second = second
def update(self):
value = True if self.first == self.second else False
ret = value if self.success else not value # invert bool if that's what we want to check
retStr = "SUCCESS" if ret else "FAILURE"
logStr = str(self.first) + " "
if value:
logStr += "== "
else:
logStr += "!= "
logStr += str(self.second) + ": " + retStr
self.log(logStr, debug=6)
return py_trees.common.Status.SUCCESS if ret else py_trees.common.Status.FAILURE
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "EqualityCheckBehaviour"
attrDict["first"] = self.first
attrDict["second"] = self.second
attrDict["success"] = self.success
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent,d["first"],d["second"],d["success"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# Like EqualityCheck, but the first value comes from game, second is given at init
class CompareToConstBehaviour(EqualityCheckBehaviour):
def __init__(self, name, agent, attr, static, success=True):
super(CompareToConstBehaviour, self).__init__(name, agent, attr, static, success)
self.attr = attr
self.static = static
def update(self):
self.first = getattr(self.agent.blackboard.game, self.attr)
return super().update()
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "CompareToConstBehaviour"
attrDict["attr"] = self.attr
attrDict["static"] = str(self.static)
attrDict["success"] = self.success
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent,d["attr"],d["static"],d["success"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
# The default ActionBehaviour, implemented by more complex action behaviours like Play
# On update, it appends its action to the queue and returns SUCCESS
class ActionBehaviour(DefaultBehaviour):
def __init__(self, name, agent, action, params=[]):
super(ActionBehaviour, self).__init__(name, agent)
self.action = action
self.params = params
def update(self):
action_class = globals()[self.action]
command = action_class(*self.params)
self.agent.cmd_queue.append(command)
return py_trees.common.Status.SUCCESS
def to_json(self):
attrDict = {}
attrDict["name"] = self.name
attrDict["class"] = "ActionBehaviour"
attrDict["action"] = self.action
attrDict["params"] = self.params
attrDict["children"] = [c.to_json() for c in self.iterate(direct_descendants=True) if c != self]
return attrDict
@classmethod
def fromDict(cls,d,agent):
ret = cls(d["name"],agent,d["action"],d["params"])
for child in d["children"]:
childClass = child["class"]
ret.add_child(classMap[childClass].fromDict(child,agent))
return ret
classMap = {"SequenceBehaviour":SequenceBehaviour, \
"SelectorBehaviour":SelectorBehaviour, \
"TestBehaviour":TestBehaviour,\
"BoolCheckBehaviour":BoolCheckBehaviour, \
"EqualityCheckBehaviour":EqualityCheckBehaviour, \
"EqualityCheckBehaviour":EqualityCheckBehaviour, \
"CompareToConstBehaviour":CompareToConstBehaviour, \
"ActionBehaviour":ActionBehaviour}
| 33.093168
| 99
| 0.721471
|
0de9a6df4d0284cbd4c73853f307b0e39fd677fc
| 13,797
|
py
|
Python
|
tests/ignite/metrics/test_metrics_lambda.py
|
WrRan/ignite
|
00c79702b3c20e87168b93e73c250035a8d1d901
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T18:28:30.000Z
|
2020-09-18T18:28:30.000Z
|
tests/ignite/metrics/test_metrics_lambda.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ignite/metrics/test_metrics_lambda.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import numpy as np
import pytest
import torch
from pytest import approx
from sklearn.metrics import f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import Metric, MetricsLambda, Precision, Recall
class ListGatherMetric(Metric):
def __init__(self, index):
super(ListGatherMetric, self).__init__()
self.index = index
def reset(self):
self.list_ = None
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
def test_metrics_lambda():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
def process_function(engine, data):
return data
engine = Engine(process_function)
def plus(this, other):
return this + other
m0_plus_m1 = MetricsLambda(plus, m0, other=m1)
m2_plus_2 = MetricsLambda(plus, m2, 2)
m0_plus_m1.attach(engine, "m0_plus_m1")
m2_plus_2.attach(engine, "m2_plus_2")
engine.run([[1, 10, 100]])
assert engine.state.metrics["m0_plus_m1"] == 11
assert engine.state.metrics["m2_plus_2"] == 102
engine.run([[2, 20, 200]])
assert engine.state.metrics["m0_plus_m1"] == 22
assert engine.state.metrics["m2_plus_2"] == 202
# metrics are partially attached
assert not m0.is_attached(engine)
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
# a dependency is detached
m0.detach(engine)
# so the lambda metric is too
assert not m0_plus_m1.is_attached(engine)
# the lambda is attached again
m0_plus_m1.attach(engine, "m0_plus_m1")
assert m0_plus_m1.is_attached(engine)
# metrics are always partially attached
assert not m0.is_attached(engine)
m0_plus_m1.detach(engine)
assert not m0_plus_m1.is_attached(engine)
# detached (and no longer partially attached)
assert not m0.is_attached(engine)
def test_metrics_lambda_reset():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
def fn(x, y, z, t):
return 1
m = MetricsLambda(fn, m0, m1, z=m2, t=0)
# initiating a new instance of MetricsLambda must reset
# its argument metrics
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
m.reset()
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
def test_integration():
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, np.argmax(y_pred, axis=-1), average=None)
recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None)
f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average="macro")
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
assert precision_true == approx(precision), "{} vs {}".format(precision_true, precision)
assert recall_true == approx(recall), "{} vs {}".format(recall_true, recall)
assert f1_true == approx(state.metrics["f1"]), "{} vs {}".format(f1_true, state.metrics["f1"])
def test_integration_ingredients_not_attached():
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average="macro")
assert f1_true == approx(state.metrics["f1"]), "{} vs {}".format(f1_true, state.metrics["f1"])
def test_state_metrics():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["precision", "recall", "f1"])
def test_state_metrics_ingredients_not_attached():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
F1.attach(evaluator, "F1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["F1"])
def test_recursive_attachment():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name,])
np_y_pred = y_pred.numpy().ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision()
precision_2 = Precision()
summed_precision = precision_1 + precision_2
def compute_true_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return p1 + p2
_test(summed_precision, "summed precision", compute_true_value_fn=compute_true_summed_precision)
precision_1 = Precision()
precision_2 = Precision()
mean_precision = (precision_1 + precision_2) / 2
def compute_true_mean_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return (p1 + p2) * 0.5
_test(mean_precision, "mean precision", compute_true_value_fn=compute_true_mean_precision)
precision_1 = Precision()
precision_2 = Precision()
some_metric = 2.0 + 0.2 * (precision_1 * precision_2 + precision_1 - precision_2) ** 0.5
def compute_true_somemetric(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return 2.0 + 0.2 * (p1 * p2 + p1 - p2) ** 0.5
_test(some_metric, "some metric", compute_true_somemetric)
def _test_distrib_integration(device):
rank = idist.get_rank()
np.random.seed(12)
n_iters = 10
batch_size = 10
n_classes = 10
def _test():
y_true = np.arange(0, n_iters * batch_size * idist.get_world_size(), dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size * idist.get_world_size(), n_classes)
for i in range(n_iters * batch_size * idist.get_world_size()):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true = y_true.reshape(n_iters * idist.get_world_size(), batch_size)
y_pred = y_pred.reshape(n_iters * idist.get_world_size(), batch_size, n_classes)
def update_fn(engine, i):
y_true_batch = y_true[i + rank * n_iters, ...]
y_pred_batch = y_pred[i + rank * n_iters, ...]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False, device=device)
recall = Recall(average=False, device=device)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
another_f1 = (1.0 + precision * recall * 2 / (precision + recall + 1e-20)).mean().item()
another_f1.attach(evaluator, "ff1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
assert "f1" in state.metrics
assert "ff1" in state.metrics
f1_true = f1_score(y_true.ravel(), np.argmax(y_pred.reshape(-1, n_classes), axis=-1), average="macro")
assert f1_true == approx(state.metrics["f1"])
assert 1.0 + f1_true == approx(state.metrics["ff1"])
for _ in range(5):
_test()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = "cuda:{}".format(local_rank)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(local_rank, distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = "cpu"
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"])
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
| 32.463529
| 110
| 0.66297
|
49a3251372381015263ea8015c42012bbf58fad9
| 7,520
|
py
|
Python
|
synapse/replication/http/federation.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | 2
|
2020-04-30T18:38:02.000Z
|
2020-07-08T21:38:28.000Z
|
synapse/replication/http/federation.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | null | null | null |
synapse/replication/http/federation.py
|
rvleij/synapse
|
77d9357226687a177c865bcdeaa0e750612fc078
|
[
"Apache-2.0"
] | 2
|
2020-03-03T18:34:52.000Z
|
2022-03-31T11:06:18.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.events import event_type_from_format_version
from synapse.events.snapshot import EventContext
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
"""Handles events newly received from federation, including persisting and
notifying.
The API looks like:
POST /_synapse/replication/fed_send_events/:txn_id
{
"events": [{
"event": { .. serialized event .. },
"internal_metadata": { .. serialized internal_metadata .. },
"rejected_reason": .., // The event.rejected_reason field
"context": { .. serialized event context .. },
}],
"backfilled": false
"""
NAME = "fed_send_events"
PATH_ARGS = ()
def __init__(self, hs):
super(ReplicationFederationSendEventsRestServlet, self).__init__(hs)
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.clock = hs.get_clock()
self.federation_handler = hs.get_handlers().federation_handler
@staticmethod
@defer.inlineCallbacks
def _serialize_payload(store, event_and_contexts, backfilled):
"""
Args:
store
event_and_contexts (list[tuple[FrozenEvent, EventContext]])
backfilled (bool): Whether or not the events are the result of
backfilling
"""
event_payloads = []
for event, context in event_and_contexts:
serialized_context = yield context.serialize(event, store)
event_payloads.append(
{
"event": event.get_pdu_json(),
"event_format_version": event.format_version,
"internal_metadata": event.internal_metadata.get_dict(),
"rejected_reason": event.rejected_reason,
"context": serialized_context,
}
)
payload = {"events": event_payloads, "backfilled": backfilled}
return payload
async def _handle_request(self, request):
with Measure(self.clock, "repl_fed_send_events_parse"):
content = parse_json_object_from_request(request)
backfilled = content["backfilled"]
event_payloads = content["events"]
event_and_contexts = []
for event_payload in event_payloads:
event_dict = event_payload["event"]
format_ver = event_payload["event_format_version"]
internal_metadata = event_payload["internal_metadata"]
rejected_reason = event_payload["rejected_reason"]
EventType = event_type_from_format_version(format_ver)
event = EventType(event_dict, internal_metadata, rejected_reason)
context = EventContext.deserialize(
self.storage, event_payload["context"]
)
event_and_contexts.append((event, context))
logger.info("Got %d events from federation", len(event_and_contexts))
await self.federation_handler.persist_events_and_notify(
event_and_contexts, backfilled
)
return 200, {}
class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
"""Handles EDUs newly received from federation, including persisting and
notifying.
Request format:
POST /_synapse/replication/fed_send_edu/:edu_type/:txn_id
{
"origin": ...,
"content: { ... }
}
"""
NAME = "fed_send_edu"
PATH_ARGS = ("edu_type",)
def __init__(self, hs):
super(ReplicationFederationSendEduRestServlet, self).__init__(hs)
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.registry = hs.get_federation_registry()
@staticmethod
def _serialize_payload(edu_type, origin, content):
return {"origin": origin, "content": content}
async def _handle_request(self, request, edu_type):
with Measure(self.clock, "repl_fed_send_edu_parse"):
content = parse_json_object_from_request(request)
origin = content["origin"]
edu_content = content["content"]
logger.info("Got %r edu from %s", edu_type, origin)
result = await self.registry.on_edu(edu_type, origin, edu_content)
return 200, result
class ReplicationGetQueryRestServlet(ReplicationEndpoint):
"""Handle responding to queries from federation.
Request format:
POST /_synapse/replication/fed_query/:query_type
{
"args": { ... }
}
"""
NAME = "fed_query"
PATH_ARGS = ("query_type",)
# This is a query, so let's not bother caching
CACHE = False
def __init__(self, hs):
super(ReplicationGetQueryRestServlet, self).__init__(hs)
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.registry = hs.get_federation_registry()
@staticmethod
def _serialize_payload(query_type, args):
"""
Args:
query_type (str)
args (dict): The arguments received for the given query type
"""
return {"args": args}
async def _handle_request(self, request, query_type):
with Measure(self.clock, "repl_fed_query_parse"):
content = parse_json_object_from_request(request)
args = content["args"]
logger.info("Got %r query", query_type)
result = await self.registry.on_query(query_type, args)
return 200, result
class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
"""Called to clean up any data in DB for a given room, ready for the
server to join the room.
Request format:
POST /_synapse/replication/fed_query/:fed_cleanup_room/:txn_id
{}
"""
NAME = "fed_cleanup_room"
PATH_ARGS = ("room_id",)
def __init__(self, hs):
super(ReplicationCleanRoomRestServlet, self).__init__(hs)
self.store = hs.get_datastore()
@staticmethod
def _serialize_payload(room_id, args):
"""
Args:
room_id (str)
"""
return {}
async def _handle_request(self, request, room_id):
await self.store.clean_room_for_join(room_id)
return 200, {}
def register_servlets(hs, http_server):
ReplicationFederationSendEventsRestServlet(hs).register(http_server)
ReplicationFederationSendEduRestServlet(hs).register(http_server)
ReplicationGetQueryRestServlet(hs).register(http_server)
ReplicationCleanRoomRestServlet(hs).register(http_server)
| 30.569106
| 81
| 0.646277
|
c97afa5bbd903a0fbfacab455dd28e48366f14c4
| 744
|
py
|
Python
|
bobocep/rules/actions/no_action.py
|
r3w0p/bobocep
|
5f08348e9a2c3b0f92e935429d4f265c1693d26c
|
[
"MIT"
] | 5
|
2019-09-02T13:19:48.000Z
|
2021-07-31T23:42:06.000Z
|
bobocep/rules/actions/no_action.py
|
r3w0p/bobocep
|
5f08348e9a2c3b0f92e935429d4f265c1693d26c
|
[
"MIT"
] | null | null | null |
bobocep/rules/actions/no_action.py
|
r3w0p/bobocep
|
5f08348e9a2c3b0f92e935429d4f265c1693d26c
|
[
"MIT"
] | 2
|
2020-11-05T07:53:37.000Z
|
2021-06-06T06:48:44.000Z
|
from bobocep.rules.actions.bobo_action import BoboAction
from bobocep.rules.events.bobo_event import BoboEvent
class NoAction(BoboAction):
"""
An action that does nothing and always returns the specified boolean value.
:param name: The action name, defaults to an empty string.
:type name: str, optional
:param bool_return: The boolean value to always return when performing
the action, defaults to True.
:type bool_return: bool, optional
"""
def __init__(self, name: str = None, bool_return: bool = True) -> None:
super().__init__(name=name)
self._bool_return = bool_return
def _perform_action(self, event: BoboEvent) -> bool:
return self._bool_return
| 31
| 79
| 0.692204
|
b31999cc54b7dc28e81bb0ebef385bd3a558c0b2
| 173
|
py
|
Python
|
break and continue.py
|
karanjakhar/python-programs
|
316cd4e83ae39c1d1e70c2a5e8a9329641bad9e1
|
[
"MIT"
] | 1
|
2019-09-02T15:56:38.000Z
|
2019-09-02T15:56:38.000Z
|
break and continue.py
|
karanjakhar/python-programs
|
316cd4e83ae39c1d1e70c2a5e8a9329641bad9e1
|
[
"MIT"
] | null | null | null |
break and continue.py
|
karanjakhar/python-programs
|
316cd4e83ae39c1d1e70c2a5e8a9329641bad9e1
|
[
"MIT"
] | null | null | null |
i=10
while(i>=1):
print(i)
i-=1
if(i>4):
continue
else:
break
while(i<16):
if(i==13):
break
print(i)
i+=1
| 11.533333
| 17
| 0.375723
|
28776a48ab65463747c86bd14be87ab4580d37ea
| 523
|
py
|
Python
|
logger_lesson/logger.py
|
farooq-teqniqly/pakt-complete-python-course
|
01717bbe97181f70c38166b3dc82ba7b00098430
|
[
"MIT"
] | null | null | null |
logger_lesson/logger.py
|
farooq-teqniqly/pakt-complete-python-course
|
01717bbe97181f70c38166b3dc82ba7b00098430
|
[
"MIT"
] | null | null | null |
logger_lesson/logger.py
|
farooq-teqniqly/pakt-complete-python-course
|
01717bbe97181f70c38166b3dc82ba7b00098430
|
[
"MIT"
] | null | null | null |
import logging
import time
logger = logging.getLogger("logger_root")
log_entry_format = "%(asctime)s %(levelname)-8s %(name)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_entry_format)
logging.Formatter.converter = time.gmtime
logger.info("Here is some info...")
logger.warning("WARNING!")
logger.debug("Debug message.")
logger.error("ERROR!")
logger.critical("NOT GOOD!")
child_logger = logging.getLogger("logger_root.child")
child_logger.critical("Child logger info...")
| 29.055556
| 95
| 0.755258
|
961d77ce05265748a670c9e7548815fb7bbd72d8
| 489
|
py
|
Python
|
zerver/lib/user_agent.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 17,004
|
2015-09-25T18:27:24.000Z
|
2022-03-31T22:02:32.000Z
|
zerver/lib/user_agent.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 20,344
|
2015-09-25T19:02:42.000Z
|
2022-03-31T23:54:40.000Z
|
zerver/lib/user_agent.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 7,271
|
2015-09-25T18:48:39.000Z
|
2022-03-31T21:06:11.000Z
|
import re
from typing import Dict
# Warning: If you change this parsing, please test using
# zerver/tests/test_decorators.py
# And extend zerver/tests/fixtures/user_agents_unique with any new test cases
pattern = re.compile(
"""^ (?P<name> [^/ ]* [^0-9/(]* )
(/ (?P<version> [^/ ]* ))?
([ /] .*)?
$""",
re.X,
)
def parse_user_agent(user_agent: str) -> Dict[str, str]:
match = pattern.match(user_agent)
assert match is not None
return match.groupdict()
| 24.45
| 77
| 0.629857
|
6f93d9b7385844241f750b700c4633c0ed16b0b9
| 974
|
py
|
Python
|
vitrage/evaluator/actions/base.py
|
mail2nsrajesh/vitrage
|
41f863bbb7568f70d347feeab8eaca13085f81ba
|
[
"Apache-2.0"
] | null | null | null |
vitrage/evaluator/actions/base.py
|
mail2nsrajesh/vitrage
|
41f863bbb7568f70d347feeab8eaca13085f81ba
|
[
"Apache-2.0"
] | null | null | null |
vitrage/evaluator/actions/base.py
|
mail2nsrajesh/vitrage
|
41f863bbb7568f70d347feeab8eaca13085f81ba
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ActionType(object):
SET_STATE = 'set_state'
RAISE_ALARM = 'raise_alarm'
ADD_CAUSAL_RELATIONSHIP = 'add_causal_relationship'
MARK_DOWN = 'mark_down'
action_types = [ActionType.SET_STATE,
ActionType.RAISE_ALARM,
ActionType.ADD_CAUSAL_RELATIONSHIP,
ActionType.MARK_DOWN]
class ActionMode(object):
DO = 'do'
UNDO = 'undo'
| 30.4375
| 75
| 0.714579
|
c90d7be74d1fea13a78f253c16b7fdeaee791ba1
| 3,538
|
py
|
Python
|
src/cray/boa/__init__.py
|
Cray-HPE/boa
|
0718b5a1f40134f16b7279a93f545d9f5ca2b664
|
[
"MIT"
] | null | null | null |
src/cray/boa/__init__.py
|
Cray-HPE/boa
|
0718b5a1f40134f16b7279a93f545d9f5ca2b664
|
[
"MIT"
] | 2
|
2022-03-09T18:00:45.000Z
|
2022-03-29T18:54:52.000Z
|
src/cray/boa/__init__.py
|
Cray-HPE/boa
|
0718b5a1f40134f16b7279a93f545d9f5ca2b664
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import os
PROTOCOL = "http"
API_GW_DNSNAME = "api-gw-service-nmn.local"
API_GW = "%s://%s/apis/" % (PROTOCOL, API_GW_DNSNAME)
API_GW_SECURE = "%ss://%s/apis/" % (PROTOCOL, API_GW_DNSNAME)
class BOAException(Exception):
"""
This is the base exception for all custom exceptions that can be raised from
this application.
"""
class InvalidInput(BOAException):
"""
There are invalid inputs to the BOA Agent.
"""
class TransientException(BOAException):
"""
Transient Exceptions are exceptions that could recover over time as a function
of services going temporarily offline. The expectation is that any
Exception that is transient in nature can be re-attempted at a later point
after required interfaces recover.
"""
class NontransientException(BOAException):
"""
Nontransient Exceptions are exceptions that are generally expected to fail
each and every time for a given boot orchestration. During the course of
excecution, any component that raises a nontransient exception will percolate
to the top level of the application stack. The application will exit 0, to
prevent Kubernetes from re-deploying the pod.
"""
class ServiceNotReady(TransientException):
"""
Raised when a service is not ready for interaction; this is used most
frequently during preflight checks. For clarification purposes, this
exception is still viable if a service is responding to requests, but
has not reached the run or state level necessary to honor the request
in question.
"""
class ServiceError(NontransientException):
"""
The service in question responded in a way that indicates the request made
is not viable and it is not likely that the service will become viable given
additional time or attempts without operator intervention.
"""
class ArtifactMissing(NontransientException):
"""
A boot artifact could not be located.
"""
class TooManyArtifacts(NontransientException):
"""
One and only one artifact was expected to be found. More than one artifact
was found.
"""
def in_cluster():
"""
Performs a check to determine if this software is running inside of a cluster.
"""
return "KUBERNETES_SERVICE_HOST" in os.environ
if in_cluster():
PROTOCOL = "http"
VERIFY = False
else:
PROTOCOL = "https"
VERIFY = True
| 34.019231
| 82
| 0.738553
|
e331dbff1b94734440a927c02902e42c3f1c9cc7
| 876
|
py
|
Python
|
src/bookmark_parser.py
|
Oscarshu0719/cableav-web-crawler
|
dce1dea30939b3799df71b17cb3f6b805b82219d
|
[
"MIT"
] | 15
|
2020-12-21T10:20:38.000Z
|
2022-03-17T05:17:09.000Z
|
src/bookmark_parser.py
|
287456405/cableav-web-crawler
|
dce1dea30939b3799df71b17cb3f6b805b82219d
|
[
"MIT"
] | 3
|
2021-01-27T08:40:35.000Z
|
2021-01-29T00:31:40.000Z
|
src/bookmark_parser.py
|
287456405/cableav-web-crawler
|
dce1dea30939b3799df71b17cb3f6b805b82219d
|
[
"MIT"
] | 8
|
2020-12-21T10:20:40.000Z
|
2021-12-03T02:31:12.000Z
|
# -*- coding: UTF-8 -*-
import re
from constants import BOOKMARK_PATH, PATTERN_BOOKMARK
"""
Usage:
python bookmark_parser.py *path*
Args:
*path*: Chrome bookmark file.
Notice:
Export bookmarks from 'Bookmark manager'.
"""
def get_urls_from_html(path):
with open(path, 'r', encoding='utf8') as file:
html = file.readlines()
pattern = re.compile(PATTERN_BOOKMARK)
urls = list()
for line in html:
match = pattern.match(line.strip())
if match:
urls.append(match.group(1))
with open(BOOKMARK_PATH, 'w') as file:
bookmarks = '\n'.join(urls)
file.write(bookmarks)
if __name__ == '__main__':
import sys
assert len(sys.argv) == 2, 'Error: The number of arguments is incorrect.'
path = sys.argv[1]
get_urls_from_html(path)
| 21.9
| 77
| 0.59589
|
98dffc423083c815f9b8befdb30d8922e5a91576
| 1,651
|
py
|
Python
|
zipline/api.py
|
liudengfeng/zipline
|
01fdd51d83efeb3453e92b7d02c255a06eba49ac
|
[
"Apache-2.0"
] | 6
|
2017-12-11T06:12:00.000Z
|
2019-05-23T17:39:10.000Z
|
zipline/api.py
|
liudengfeng/zipline
|
01fdd51d83efeb3453e92b7d02c255a06eba49ac
|
[
"Apache-2.0"
] | null | null | null |
zipline/api.py
|
liudengfeng/zipline
|
01fdd51d83efeb3453e92b7d02c255a06eba49ac
|
[
"Apache-2.0"
] | 1
|
2018-01-26T14:19:38.000Z
|
2018-01-26T14:19:38.000Z
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note that part of the API is implemented in TradingAlgorithm as
# methods (e.g. order). These are added to this namespace via the
# decorator ``api_method`` inside of algorithm.py.
from .finance.asset_restrictions import (
Restriction,
StaticRestrictions,
HistoricalRestrictions,
RESTRICTION_STATES,
)
from .finance import commission, execution, slippage, cancel_policy
from .finance.cancel_policy import (
NeverCancel,
EODCancel
)
from .finance.slippage import (
FixedSlippage,
FixedBasisPointsSlippage,
VolumeShareSlippage,
)
from .utils import math_utils, events
from .utils.events import (
calendars,
date_rules,
time_rules
)
__all__ = [
'EODCancel',
'FixedSlippage',
'FixedBasisPointsSlippage',
'NeverCancel',
'VolumeShareSlippage',
'Restriction',
'StaticRestrictions',
'HistoricalRestrictions',
'RESTRICTION_STATES',
'cancel_policy',
'commission',
'date_rules',
'events',
'execution',
'math_utils',
'slippage',
'time_rules',
'calendars',
]
| 27.065574
| 74
| 0.721381
|
0ea057e33dca1f4a3b6964d86d76c4e3e8cef0ac
| 2,045
|
py
|
Python
|
mac/google-cloud-sdk/lib/surface/iot/devices/configs/list.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
mac/google-cloud-sdk/lib/surface/iot/devices/configs/list.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
mac/google-cloud-sdk/lib/surface/iot/devices/configs/list.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved. #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list configurations for a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import resource_args
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.GA)
class List(base.ListCommand):
"""List configs for a device.
This command lists all available configurations in the history of the device.
Up to 10 are kept; you may restrict the output to fewer via the `--limit`
flag.
"""
detailed_help = {
'EXAMPLES':
"""\
To list the 3 latest configurations of a device in region 'us-central1', run:
$ {command} --region=us-central1 --registry=my-registry --device=my-device --limit=3
""",
}
@staticmethod
def Args(parser):
parser.display_info.AddFormat(
'table(version, cloudUpdateTime, deviceAckTime)')
base.URI_FLAG.RemoveFromParser(parser)
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
resource_args.AddDeviceResourceArg(parser, 'for which to list configs',
positional=False)
def Run(self, args):
"""Run the list command."""
client = devices.DeviceConfigsClient()
device_ref = args.CONCEPTS.device.Parse()
return client.List(device_ref, args.limit)
| 34.661017
| 96
| 0.719315
|
e1ee84f551836390f2ff2571629a339a8fd6924a
| 2,383
|
py
|
Python
|
eve_auth/tasks.py
|
bastianh/django-eve-auth
|
26d7440a41708d46b057f98d60dffe9705c03d8b
|
[
"BSD-2-Clause"
] | null | null | null |
eve_auth/tasks.py
|
bastianh/django-eve-auth
|
26d7440a41708d46b057f98d60dffe9705c03d8b
|
[
"BSD-2-Clause"
] | null | null | null |
eve_auth/tasks.py
|
bastianh/django-eve-auth
|
26d7440a41708d46b057f98d60dffe9705c03d8b
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
from datetime import datetime, timezone
import logging
from celery import shared_task
from celery.utils.log import get_task_logger
from .utils.eveapi import eveapi
logger2 = get_task_logger(__name__)
logger = logging.getLogger(__name__)
@shared_task
def check_key(key_id):
from .models import ApiKey, Character, Corporation
api_model = ApiKey.objects.get(pk=key_id)
account = eveapi.get_account_api(api_model=api_model)
info, _, _ = account.key_info()
api_model.key_type = info.get("type")
api_model.access_mask = info.get("access_mask")
api_model.status = "active"
expires = info.get("expire_ts")
if expires:
api_model.expires = datetime.utcfromtimestamp(expires).replace(tzinfo=timezone.utc)
else:
api_model.expires = None
api_model.updated = datetime.now(timezone.utc)
if api_model.key_type in ['account', 'char']:
for charid, chardata in info.get("characters", {}).items():
character = Character.get_or_create(character_id=charid, character_name=chardata.get('name'))
api_model.characters.add(character)
if api_model.key_type == "corp":
corpinfo = list(info.get("characters").values())[0].get("corp")
corp = Corporation.get_or_create(corporation_id=corpinfo.get("id"), corporation_name=corpinfo.get("name"))
api_model.corporation = corp
api_model.save()
return 1
@shared_task
def update_character_info(character_id):
from .models import Character, Corporation, Alliance
eve = eveapi.get_eve_api()
try:
character = Character.objects.get(id=character_id)
except Character.DoesNotExist:
return False
info, _, _ = eve.character_info_from_id(char_id=character_id)
corp = info.get("corp", {})
corpmodel = Corporation.get_or_create(corporation_id=corp.get("id"), corporation_name=corp.get("name"))
character.corporation = corpmodel
alliance_data = info.get("alliance", {})
if corpmodel.alliance_id != alliance_data.get("id"):
corpmodel.alliance = Alliance.get_or_create(alliance_id=alliance_data.get("id"),
alliance_name=alliance_data.get("name"))
corpmodel.save()
character.updated = datetime.utcnow().replace(tzinfo=timezone.utc)
character.save()
return True
| 33.097222
| 114
| 0.698699
|
f44450f3ad90b9eb54c11bc67a84a71a66681324
| 3,474
|
py
|
Python
|
darglint/parse/grammars/google_short_description.py
|
s-weigand/darglint
|
6bc5d764db86626a996de1ff50925f976bf1449e
|
[
"MIT"
] | 405
|
2017-10-19T11:04:21.000Z
|
2022-03-23T07:58:40.000Z
|
darglint/parse/grammars/google_short_description.py
|
s-weigand/darglint
|
6bc5d764db86626a996de1ff50925f976bf1449e
|
[
"MIT"
] | 186
|
2018-03-26T20:33:37.000Z
|
2022-03-20T22:47:54.000Z
|
darglint/parse/grammars/google_short_description.py
|
s-weigand/darglint
|
6bc5d764db86626a996de1ff50925f976bf1449e
|
[
"MIT"
] | 43
|
2018-10-14T23:49:48.000Z
|
2022-02-10T12:39:16.000Z
|
# Generated on 2020-04-04 11:23:40.191231
from darglint.parse.grammar import (
BaseGrammar,
P,
)
from darglint.token import (
TokenType,
)
from darglint.parse.identifiers import (
NoqaIdentifier,
)
class ShortDescriptionGrammar(BaseGrammar):
productions = [
P("short-description", ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.INDENT, 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("line", ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.INDENT, 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("word", (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("colon", (TokenType.COLON, 0)),
P("hash", (TokenType.HASH, 0)),
P("noqa", (TokenType.NOQA, 0)),
P("noqa-maybe", ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0)),
P("noqa-head", ([], "hash", "noqa", 0)),
P("words", ([], "word", "words", 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("noqa-statement1", ([], "colon", "words", 0)),
]
start = "short-description"
| 119.793103
| 779
| 0.661485
|
715de4bb93c4a15635052daabd7c11699f82b1d7
| 505
|
py
|
Python
|
PROC Monitor/lab.py
|
IRIDIUM-SUB/Sys_Course_Design
|
52ec96378e9f9c8d7dc366efcba154df3f1cbc67
|
[
"MIT"
] | null | null | null |
PROC Monitor/lab.py
|
IRIDIUM-SUB/Sys_Course_Design
|
52ec96378e9f9c8d7dc366efcba154df3f1cbc67
|
[
"MIT"
] | null | null | null |
PROC Monitor/lab.py
|
IRIDIUM-SUB/Sys_Course_Design
|
52ec96378e9f9c8d7dc366efcba154df3f1cbc67
|
[
"MIT"
] | null | null | null |
import tkinter
'''
win=tkinter.Tk() #构造窗体
my_frame = tkinter.Frame(win, relief="sunken")
my_frame.pack()
mylist=tkinter.Listbox(my_frame,width=100) #列表框
mylist.pack()
for item in ["1","asdsa","asdsadsa","asdsadsad",1,2,3,4,5,6,7,8,9,11,22,33,44,55,66,77]: #插入内容
mylist.insert(tkinter.END,item) #从尾部插入
tkinter.Label(win, text="This", borderwidth=2, relief="groove").pack()
win.mainloop() #进入消息循环
'''
d=list()
p={'ss':"ojd",'sad':'daw'}
for item in p:
d.append((item,p[item]))
print(set(d))
| 26.578947
| 96
| 0.663366
|
c3dba09a12f9596c49a33add17d65375a028eb87
| 9,619
|
py
|
Python
|
setup.py
|
cshivade/pytorchpipe
|
d8d92937de2ae7e23db1895c79a09ba47f5698e0
|
[
"Apache-2.0"
] | 1
|
2021-09-03T07:49:42.000Z
|
2021-09-03T07:49:42.000Z
|
setup.py
|
cshivade/pytorchpipe
|
d8d92937de2ae7e23db1895c79a09ba47f5698e0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cshivade/pytorchpipe
|
d8d92937de2ae7e23db1895c79a09ba47f5698e0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) IBM Corporation 2018-2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path,makedirs
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
# from io import open
here = path.abspath(path.dirname(__file__))
# Get path to configs.
configs_path = path.join(here,"configs/")
# Export path to config file in ~/.ptp/ folder.
ptp_path = path.expanduser("~/.ptp/")
# Make dir.
makedirs(path.dirname(ptp_path), exist_ok=True)
# Write path to configs.
with open(path.join(ptp_path, "config.txt"),"w") as file:
file.write(configs_path)
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='ptp', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='PyTorchPipe: framework for building multi-modal PyTorch pipelines',
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/IBM/pytorchpipe/', # Optional
license='Apache 2.0',
# This should be your name or the name of the organization which owns the
# project.
author='Tomasz Kornuta', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='tkornuta@us.ibm.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
# This information is only used for searching & browsing projects on PyPI, not for installing projects
# Checkout numpy: https://pypi.org/project/numpy/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 0 - Pre Pre Alfa',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Operating System :: Linux',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='machine learning neural nets pytorch pipeline component problem model', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['docs', 'configs', 'build', 'experiments', 'scripts']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
python_requires='~=3.6',
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# Should not pin down version
# It is not considered best practice to use install_requires to pin
# dependencies to specific versions, or to specify sub-dependencies
# (i.e. dependencies of your dependencies). This is overly-restrictive,
# and prevents the user from gaining the benefit of dependency upgrades.
install_requires=[
'tqdm',
'nltk',
'pandas',
'pillow',
#'torchtext',
'torchvision',
'torch',
'PyYAML',
'requests'
],
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
# 'dev': ['tensorflow', 'ipdb', 'tensorboard', 'visdom', 'tensorboardX'],
# 'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={},
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'ptp-online-trainer=ptp.workers.online_trainer:main',
'ptp-processor=ptp.workers.processor:main',
]
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Source': 'https://github.com/tkornut/pytorchpipe/',
},
)
| 41.461207
| 106
| 0.693627
|
48541a57e95f20c2caa073c82601fea68669d5e0
| 12,843
|
py
|
Python
|
build/lib/simplemediawiki.py
|
LucianNovo/WikiKrawl
|
8a6d150cba74b12c7c62cb2044c14816e190df79
|
[
"MIT"
] | 2
|
2015-04-03T06:15:10.000Z
|
2022-03-12T15:37:36.000Z
|
simplemediawiki.py
|
LucianNovo/WikiKrawl
|
8a6d150cba74b12c7c62cb2044c14816e190df79
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/lib/wikipedia/simplemediawiki.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
# python-simplemediawiki - Extremely low-level wrapper to the MediaWiki API
# Copyright (C) 2011 Red Hat, Inc.
# Primary maintainer: Ian Weller <iweller@redhat.com>
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
"""
:py:mod:`simplemediawiki` is an extremely low-level wrapper to the `MediaWiki
API`_. It automatically handles cookies and gzip compression so that you can
make basic calls to the API in the easiest and fastest way possible. It also
provides a few functions to make day-to-day API access easier.
To use this module, initialize a :py:class:`MediaWiki` object, passing it the
URL of api.php for the wiki you want to work with. Calls go through
:py:func:`MediaWiki.call`. A generic login wrapper as well as functions to
determine limits and get a list of namespaces are provided for your
convenience.
>>> from simplemediawiki import MediaWiki
>>> wiki = MediaWiki('http://en.wikipedia.org/w/api.php')
>>> wiki.call({'action': 'query', 'prop': 'revisions', 'titles': 'Main Page'})
{u'query': {u'pages': {...}}}
.. _`MediaWiki API`: http://www.mediawiki.org/wiki/API:Main_page
"""
import cookielib
from datetime import datetime
import gzip
try:
import simplejson as json
except ImportError:
import json
from kitchen.text.converters import to_bytes
from StringIO import StringIO
import urllib
import urllib2
__author__ = 'Ian Weller <iweller@redhat.com>'
__version__ = '1.1.1'
DEFAULT_UA = ('python-simplemediawiki/%s '
'+https://github.com/ianweller/python-simplemediawiki') \
% __version__
class MediaWiki(object):
"""
Create a new object to access a wiki via *api_url*.
If you're interested in saving session data across multiple
:py:class:`MediaWiki` objects, provide a filename *cookie_file* to where
you want to save the cookies.
Applications that use simplemediawiki should change the *user_agent*
argument to something that can help identify the application if it is
misbehaving. It's recommended to use :py:func:`build_user_agent` to create
a `User-Agent`_ string that will be most helpful to server administrators.
Wikimedia sites enforce using a correct User-Agent; you should read
`Wikimedia's User-Agent policy`_ if you plan to be accessing those wikis.
.. tip::
If a user of your application may not know how to get the correct API
URL for their MediaWiki, you can try getting the right one with
:py:func:`MediaWiki.normalize_api_url`.
:param api_url: URL for the path to the API endpoint
:param cookie_file: path to a :py:class:`cookielib.MozillaCookieJar` file
:param user_agent: string sent as ``User-Agent`` header to web server
.. _`User-Agent`: http://en.wikipedia.org/wiki/User_agent
.. _`Wikimedia's User-Agent policy`:
http://meta.wikimedia.org/wiki/User-Agent_policy
"""
_high_limits = None
_namespaces = None
_psuedo_namespaces = None
def __init__(self, api_url, cookie_file=None, user_agent=DEFAULT_UA):
self._api_url = api_url
if cookie_file:
self._cj = cookielib.MozillaCookieJar(cookie_file)
try:
self._cj.load()
except IOError:
self._cj.save()
self._cj.load()
else:
self._cj = cookielib.CookieJar()
self._opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self._cj)
)
self._opener.addheaders = [('User-Agent', user_agent)]
def _fetch_http(self, url, params):
"""
Standard HTTP request handler for this class with gzip and cookie
support. This was separated out of :py:func:`MediaWiki.call` to make
:py:func:`MediaWiki.normalize_api_url` useful.
.. note::
This function should not be used. Use :py:func:`MediaWiki.call`
instead.
:param url: URL to send POST request to
:param params: dictionary of query string parameters
"""
params['format'] = 'json'
# urllib.urlencode expects str objects, not unicode
fixed = dict([(to_bytes(b[0]), to_bytes(b[1]))
for b in params.items()])
request = urllib2.Request(url, urllib.urlencode(fixed))
request.add_header('Accept-encoding', 'gzip')
response = self._opener.open(request)
if isinstance(self._cj, cookielib.MozillaCookieJar):
self._cj.save()
if response.headers.get('Content-Encoding') == 'gzip':
compressed = StringIO(response.read())
gzipper = gzip.GzipFile(fileobj=compressed)
data = gzipper.read()
else:
data = response.read()
return data
def call(self, params):
"""
Make an API call to the wiki. *params* is a dictionary of query string
arguments. For example, to get basic information about the wiki, run:
>>> wiki.call({'action': 'query', 'meta': 'siteinfo'})
which would make a call to
``http://domain/w/api.php?action=query&meta=siteinfo&format=json``
(except the query string would be sent in POST).
:param params: dictionary of query string parameters
:returns: dictionary containing API response
"""
return json.loads(self._fetch_http(self._api_url, params))
def normalize_api_url(self):
"""
Checks that the API URL used to initialize this object actually returns
JSON. If it doesn't, make some educated guesses and try to find the
correct URL.
:returns: a valid API URL or ``None``
"""
def tester(self, api_url):
"""
Attempts to fetch general information about the MediaWiki instance
in order to test whether *api_url* will return JSON.
"""
data = self._fetch_http(api_url, {'action': 'query',
'meta': 'siteinfo'})
try:
data_json = json.loads(data)
return (data, data_json)
except ValueError:
return (data, None)
data, data_json = tester(self, self._api_url)
if data_json:
return self._api_url
else:
# if there's an index.php in the URL, we might find the API
if 'index.php' in self._api_url:
test_api_url = self._api_url.split('index.php')[0] + 'api.php'
print test_api_url
test_data, test_data_json = tester(self, test_api_url)
print (test_data, test_data_json)
if test_data_json:
self._api_url = test_api_url
return self._api_url
return None
def login(self, user, passwd):
"""
Logs into the wiki with username *user* and password *passwd*. Returns
``True`` on successful login.
:param user: username
:param passwd: password
:returns: ``True`` on successful login, otherwise ``False``
"""
def do_login(self, user, passwd, token=None):
"""
Login function that handles CSRF protection (see `MediaWiki bug
23076`_). Returns ``True`` on successful login.
.. _`MediaWiki bug 23076`:
https://bugzilla.wikimedia.org/show_bug.cgi?id=23076
"""
data = {'action': 'login',
'lgname': user,
'lgpassword': passwd}
if token:
data['lgtoken'] = token
result = self.call(data)
if result['login']['result'] == 'Success':
self._high_limits = None
return True
elif result['login']['result'] == 'NeedToken' and not token:
return do_login(self, user, passwd, result['login']['token'])
else:
return False
return do_login(self, user, passwd)
def logout(self):
"""
Logs out of the wiki.
:returns: ``True``
"""
data = {'action': 'logout'}
self.call(data)
self._high_limits = None
return True
def limits(self, low, high):
"""
Convenience function for determining appropriate limits in the API. If
the (usually logged-in) client has the ``apihighlimits`` right, it will
return *high*; otherwise it will return *low*.
It's generally a good idea to use the highest limit possible; this
reduces the amount of HTTP requests and therefore overhead. Read the
API documentation for details on the limits for the function you are
using.
:param low: value to return if client does not have ``apihighlimits``
:param high: value to return if client has ``apihighlimits``
:returns: *low* or *high*
"""
if self._high_limits == None:
result = self.call({'action': 'query',
'meta': 'userinfo',
'uiprop': 'rights'})
self._high_limits = 'apihighlimits' in \
result['query']['userinfo']['rights']
if self._high_limits:
return high
else:
return low
def namespaces(self, psuedo=True):
"""
Fetches a list of namespaces for this wiki and returns them as a
dictionary of namespace IDs corresponding to namespace names. If
*psuedo* is ``True``, the dictionary will also list psuedo-namespaces,
which are the "Special:" and "Media:" namespaces (special because they
have no content associated with them and their IDs are negative).
:param psuedo: boolean to determine inclusion of psuedo-namespaces
:returns: dictionary of namespace IDs and names
"""
if self._namespaces == None:
result = self.call({'action': 'query',
'meta': 'siteinfo',
'siprop': 'namespaces'})
self._namespaces = {}
self._psuedo_namespaces = {}
for nsid in result['query']['namespaces']:
if int(nsid) >= 0:
self._namespaces[int(nsid)] = \
result['query']['namespaces'][nsid]['*']
else:
self._psuedo_namespaces[int(nsid)] = \
result['query']['namespaces'][nsid]['*']
if psuedo:
retval = {}
retval.update(self._namespaces)
retval.update(self._psuedo_namespaces)
return retval
else:
return self._namespaces
@staticmethod
def parse_date(date):
"""
Converts `ISO 8601`_ dates generated by the MediaWiki API into
:py:class:`datetime.datetime` objects.
This will return a time in what your wiki thinks is UTC. Plan
accordingly for bad server configurations.
.. _`ISO 8601`: http://en.wikipedia.org/wiki/ISO_8601
:param date: string ISO 8601 date representation
:returns: :py:class:`datetime.datetime` object
"""
# MediaWiki API dates are always of the format
# YYYY-MM-DDTHH:MM:SSZ
# (see $formats in wfTimestamp() in includes/GlobalFunctions.php)
return datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')
def build_user_agent(application_name, version, url):
"""
Build a good User-Agent header string that can help server administrators
contact you if your application is misbehaving. This string will also
contain a reference to python-simplemediawiki.
See the documentation for :py:class:`simplemediawiki.MediaWiki` for good
reasons why you should use a custom User-Agent string for your application.
:param application_name: your application's name
:param version: your application's version
:param url: a URL where smoeone can find information about your \
application or your email address
:returns: User-Agent string
"""
return '%s/%s %s/%s (+%s)' % (application_name, version,
'python-simplemediawiki', __version__, url)
| 39.395706
| 79
| 0.615666
|
00501d3ddabc4475f90bfd6ce29bce3023f32081
| 4,248
|
py
|
Python
|
airflow/contrib/hooks/aws_glue_catalog_hook.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 1
|
2020-09-03T09:35:30.000Z
|
2020-09-03T09:35:30.000Z
|
airflow/contrib/hooks/aws_glue_catalog_hook.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 1
|
2019-03-27T02:21:36.000Z
|
2019-03-27T02:21:36.000Z
|
airflow/contrib/hooks/aws_glue_catalog_hook.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 2
|
2020-04-24T10:51:17.000Z
|
2020-05-26T01:50:29.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.aws_hook import AwsHook
class AwsGlueCatalogHook(AwsHook):
"""
Interact with AWS Glue Catalog
:param aws_conn_id: ID of the Airflow connection where
credentials and extra configuration are stored
:type aws_conn_id: str
:param region_name: aws region name (example: us-east-1)
:type region_name: str
"""
def __init__(self,
aws_conn_id='aws_default',
region_name=None,
*args,
**kwargs):
self.region_name = region_name
super(AwsGlueCatalogHook, self).__init__(aws_conn_id=aws_conn_id, *args, **kwargs)
def get_conn(self):
"""
Returns glue connection object.
"""
self.conn = self.get_client_type('glue', self.region_name)
return self.conn
def get_partitions(self,
database_name,
table_name,
expression='',
page_size=None,
max_items=None):
"""
Retrieves the partition values for a table.
:param database_name: The name of the catalog database where the partitions reside.
:type database_name: str
:param table_name: The name of the partitions' table.
:type table_name: str
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:type expression: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('get_partitions')
response = paginator.paginate(
DatabaseName=database_name,
TableName=table_name,
Expression=expression,
PaginationConfig=config
)
partitions = set()
for page in response:
for p in page['Partitions']:
partitions.add(tuple(p['Values']))
return partitions
def check_for_partition(self, database_name, table_name, expression):
"""
Checks whether a partition exists
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table @partition belongs to
:type table_name: str
:expression: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type expression: str
:rtype: bool
>>> hook = AwsGlueCatalogHook()
>>> t = 'static_babynames_partitioned'
>>> hook.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
partitions = self.get_partitions(database_name, table_name, expression, max_items=1)
if partitions:
return True
else:
return False
| 35.697479
| 137
| 0.627589
|
ef3b557b0aece96c72158903948d94d72d0380c9
| 15,092
|
py
|
Python
|
advanced_filters/forms.py
|
FreckleIOT/django-advanced-filters
|
fc4dd0b5f17051dad282eaca3482da7c9ef0ac70
|
[
"MIT"
] | null | null | null |
advanced_filters/forms.py
|
FreckleIOT/django-advanced-filters
|
fc4dd0b5f17051dad282eaca3482da7c9ef0ac70
|
[
"MIT"
] | null | null | null |
advanced_filters/forms.py
|
FreckleIOT/django-advanced-filters
|
fc4dd0b5f17051dad282eaca3482da7c9ef0ac70
|
[
"MIT"
] | null | null | null |
import logging
import operator
from datetime import datetime as dt
from pprint import pformat
import django
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.utils import get_fields_from_path
from django.db.models import FieldDoesNotExist, Q
from django.db.models.fields import DateField
from django.forms.formsets import BaseFormSet, formset_factory
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves import range, reduce
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from .form_helpers import CleanWhiteSpacesMixin, VaryingTypeCharField
from .models import AdvancedFilter
# django < 1.9 support
USE_VENDOR_DIR = django.VERSION >= (1, 9)
logger = logging.getLogger('advanced_filters.forms')
# select2 location can be modified via settings
SELECT2_JS = getattr(settings, 'SELECT2_JS', 'select2/select2.min.js')
SELECT2_CSS = getattr(settings, 'SELECT2_CSS', 'select2/select2.min.css')
def date_to_string(timestamp):
if timestamp:
return dt.fromtimestamp(timestamp).strftime('%Y-%m-%d')
else:
return ""
class AdvancedFilterQueryForm(CleanWhiteSpacesMixin, forms.Form):
""" Build the query from field, operator and value """
OPERATORS = (
("iexact", _("Equals")),
("icontains", _("Contains")),
("iregex", _("One of")),
("range", _("DateTime Range")),
("isnull", _("Is NULL")),
("istrue", _("Is TRUE")),
("isfalse", _("Is FALSE")),
("lt", _("Less Than")),
("gt", _("Greater Than")),
("lte", _("Less Than or Equal To")),
("gte", _("Greater Than or Equal To")),
)
FIELD_CHOICES = (
("_OR", _("Or (mark an or between blocks)")),
)
field = forms.ChoiceField(required=True, widget=forms.Select(
attrs={'class': 'query-field'}), label=_('Field'))
operator = forms.ChoiceField(
label=_('Operator'),
required=True, choices=OPERATORS, initial="iexact",
widget=forms.Select(attrs={'class': 'query-operator'}))
value = VaryingTypeCharField(required=False, widget=forms.TextInput(
attrs={'class': 'query-value'}), label=_('Value'))
value_from = forms.DateTimeField(widget=forms.HiddenInput(
attrs={'class': 'query-dt-from'}), required=False)
value_to = forms.DateTimeField(widget=forms.HiddenInput(
attrs={'class': 'query-dt-to'}), required=False)
negate = forms.BooleanField(initial=False, required=False, label=_('Exclude'))
def _build_field_choices(self, fields):
"""
Iterate over passed model fields tuple and update initial choices.
"""
return tuple(sorted(
[(fquery, capfirst(fname)) for fquery, fname in fields.items()],
key=lambda f: f[1].lower())
) + self.FIELD_CHOICES
def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: True}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']}
@staticmethod
def _parse_query_dict(query_data, model):
"""
Take a list of query field dict and return data for form initialization
"""
operator = 'iexact'
if query_data['field'] == '_OR':
query_data['operator'] = operator
return query_data
parts = query_data['field'].split('__')
if len(parts) < 2:
field = parts[0]
else:
if parts[-1] in dict(AdvancedFilterQueryForm.OPERATORS).keys():
field = '__'.join(parts[:-1])
operator = parts[-1]
else:
field = query_data['field']
query_data['field'] = field
mfield = get_fields_from_path(model, query_data['field'])
if not mfield:
raise Exception('Field path "%s" could not be followed to a field'
' in model %s', query_data['field'], model)
else:
mfield = mfield[-1] # get the field object
if operator == "isnull":
query_data['operator'] = "isnull"
elif query_data['value'] is None:
query_data['operator'] = "isnull"
elif query_data['value'] is True:
query_data['operator'] = "istrue"
elif query_data['value'] is False:
query_data['operator'] = "isfalse"
else:
if isinstance(mfield, DateField):
# this is a date/datetime field
query_data['operator'] = "range" # default
else:
query_data['operator'] = operator # default
if isinstance(query_data.get('value'),
list) and query_data['operator'] == 'range':
date_from = date_to_string(query_data.get('value_from'))
date_to = date_to_string(query_data.get('value_to'))
query_data['value'] = ','.join([date_from, date_to])
return query_data
def set_range_value(self, data):
"""
Validates date range by parsing into 2 datetime objects and
validating them both.
"""
dtfrom = data.pop('value_from')
dtto = data.pop('value_to')
if dtfrom is dtto is None:
self.errors['value'] = ['Date range requires values']
raise forms.ValidationError([])
data['value'] = (dtfrom, dtto)
def clean(self):
cleaned_data = super(AdvancedFilterQueryForm, self).clean()
if cleaned_data.get('operator') == "range":
if ('value_from' in cleaned_data and
'value_to' in cleaned_data):
self.set_range_value(cleaned_data)
elif (not (cleaned_data.get('field') == "_OR" or
cleaned_data.get('operator') == "isnull" or
cleaned_data.get('operator') == "istrue" or
cleaned_data.get('operator') == "isfalse") and
cleaned_data.get('value') == ''):
logger.debug(
"Errors validating advanced query filters: value "
"is a required attribute")
raise forms.ValidationError({'value': ["This field is required.", ]})
return cleaned_data
def make_query(self, *args, **kwargs):
""" Returns a Q object from the submitted form """
query = Q() # initial is an empty query
query_dict = self._build_query_dict(self.cleaned_data)
if 'negate' in self.cleaned_data and self.cleaned_data['negate']:
query = query & ~Q(**query_dict)
else:
query = query & Q(**query_dict)
return query
def __init__(self, model_fields={}, readonly=False, *args, **kwargs):
self.readonly = readonly
super(AdvancedFilterQueryForm, self).__init__(*args, **kwargs)
self.FIELD_CHOICES = self._build_field_choices(model_fields)
self.fields['field'].choices = self.FIELD_CHOICES
if not self.fields['field'].initial:
self.fields['field'].initial = self.FIELD_CHOICES[0]
self.fields['field'].disabled = self.readonly
self.fields['operator'].disabled = self.readonly
self.fields['value'].disabled = self.readonly
self.fields['value_from'].disabled = self.readonly
self.fields['value_to'].disabled = self.readonly
self.fields['negate'].disabled = self.readonly
class AdvancedFilterFormSet(BaseFormSet):
""" """
fields = ()
extra_kwargs = {}
def __init__(self, *args, **kwargs):
self.model_fields = kwargs.pop('model_fields', {})
self.readonly = kwargs.pop('readonly', False)
super(AdvancedFilterFormSet, self).__init__(*args, **kwargs)
if self.forms:
form = self.forms[0]
self.fields = form.visible_fields()
def get_form_kwargs(self, index):
kwargs = super(AdvancedFilterFormSet, self).get_form_kwargs(index)
kwargs['model_fields'] = self.model_fields
kwargs['readonly'] = self.readonly
return kwargs
@cached_property
def forms(self):
# override the original property to include `model_fields` and `readonly` argument
forms = [self._construct_form(
i, model_fields=self.model_fields, readonly=self.readonly)
for i in range(self.total_form_count())]
forms.append(self.empty_form) # add initial empty form
return forms
AFQFormSet = formset_factory(
AdvancedFilterQueryForm, formset=AdvancedFilterFormSet,
extra=1, can_delete=True)
AFQFormSetNoExtra = formset_factory(
AdvancedFilterQueryForm, formset=AdvancedFilterFormSet,
extra=0, can_delete=True)
class AdvancedFilterForm(CleanWhiteSpacesMixin, forms.ModelForm):
""" Form to save/edit advanced filter forms """
class Meta:
model = AdvancedFilter
fields = ('title', 'is_public')
class Media:
required_js = [
'admin/js/%sjquery.min.js' % ('vendor/jquery/' if USE_VENDOR_DIR else ''),
'advanced-filters/jquery_adder.js',
'orig_inlines%s.js' % ('' if settings.DEBUG else '.min'),
'magnific-popup/jquery.magnific-popup.js',
'advanced-filters/advanced-filters.js',
]
js = required_js + [SELECT2_JS]
css = {'screen': [
SELECT2_CSS,
'advanced-filters/advanced-filters.css',
'magnific-popup/magnific-popup.css'
]}
def get_fields_from_model(self, model, fields):
"""
Iterate over given <field> names (in "orm query" notation) and find
the actual field given the initial <model>.
If <field> is a tuple of the format ('field_name', 'Verbose name'),
overwrite the field's verbose name with the given name for display
purposes.
"""
model_fields = {}
for field in fields:
if isinstance(field, tuple) and len(field) == 2:
field, verbose_name = field[0], field[1]
else:
try:
model_field = get_fields_from_path(model, field)[-1]
verbose_name = model_field.verbose_name
except (FieldDoesNotExist, IndexError, TypeError) as e:
logger.warn("AdvancedFilterForm: skip invalid field "
"- %s", e)
continue
model_fields[field] = verbose_name
return model_fields
def __init__(self, *args, **kwargs):
model_admin = kwargs.pop('model_admin', None)
instance = kwargs.get('instance')
extra_form = kwargs.pop('extra_form', False)
self.readonly = kwargs.pop('readonly', False)
# TODO: allow all fields to be determined by model
filter_fields = kwargs.pop('filter_fields', None)
if model_admin:
self._model = model_admin.model
elif instance and instance.model:
# get existing instance model
self._model = apps.get_model(*instance.model.split('.'))
try:
admin_instance = getattr(settings, 'ADVANCED_FILTERS_ADMIN_INSTANCE', None)
if admin_instance:
site = import_string(admin_instance).site
else:
site = admin.site
model_admin = site._registry[self._model]
except KeyError:
logger.debug('No ModelAdmin registered for %s', self._model)
else:
raise Exception('Adding new AdvancedFilter from admin is '
'not supported')
self._filter_fields = filter_fields or getattr(
model_admin, 'advanced_filter_fields', ())
super(AdvancedFilterForm, self).__init__(*args, **kwargs)
# populate existing or empty forms formset
data = None
if len(args):
data = args[0]
elif kwargs.get('data'):
data = kwargs.get('data')
self.initialize_form(instance, self._model, data, extra_form)
def clean(self):
cleaned_data = super(AdvancedFilterForm, self).clean()
if not self.fields_formset.is_valid():
logger.debug(
"Errors validating advanced query filters: %s",
pformat([(f.errors, f.non_field_errors())
for f in self.fields_formset.forms]))
raise forms.ValidationError("Error validating filter forms")
cleaned_data['model'] = "%s.%s" % (self._model._meta.app_label,
self._model._meta.object_name)
return cleaned_data
@property
def _non_deleted_forms(self):
forms = []
for form in self.fields_formset.forms:
if form in self.fields_formset.deleted_forms:
continue # skip deleted forms when generating query
forms.append(form)
return forms
def generate_query(self):
""" Reduces multiple queries into a single usable query """
query = Q()
ORed = []
for form in self._non_deleted_forms:
if not hasattr(form, 'cleaned_data'):
continue
if form.cleaned_data['field'] == "_OR":
ORed.append(query)
query = Q()
else:
query = query & form.make_query()
if ORed:
if query: # add last query for OR if any
ORed.append(query)
query = reduce(operator.or_, ORed)
return query
def initialize_form(self, instance, model, data=None, extra=None):
""" Takes a "finalized" query and generate it's form data """
model_fields = self.get_fields_from_model(model, self._filter_fields)
forms = []
if instance:
for field_data in instance.list_fields():
forms.append(
AdvancedFilterQueryForm._parse_query_dict(
field_data, model))
formset = AFQFormSetNoExtra if not extra else AFQFormSet
self.fields_formset = formset(
data=data,
initial=forms or None,
model_fields=model_fields,
readonly=self.readonly
)
def save(self, commit=True):
self.instance.query = self.generate_query()
self.instance.model = self.cleaned_data.get('model')
return super(AdvancedFilterForm, self).save(commit)
| 38.896907
| 91
| 0.597403
|
51f4a0b974ef0151a0d337402095ba68312bf25c
| 463
|
py
|
Python
|
test/os_test.py
|
littlecharacter/AutoWork
|
feebb8459f889b7a9165073be8fd44ba544cbb35
|
[
"Apache-2.0"
] | null | null | null |
test/os_test.py
|
littlecharacter/AutoWork
|
feebb8459f889b7a9165073be8fd44ba544cbb35
|
[
"Apache-2.0"
] | null | null | null |
test/os_test.py
|
littlecharacter/AutoWork
|
feebb8459f889b7a9165073be8fd44ba544cbb35
|
[
"Apache-2.0"
] | null | null | null |
import os
import psutil
import time
import subprocess
if __name__ == "__main__":
pass
# op_content = "/Applications/微信.app"
# p = subprocess.Popen(op_content)
# time.sleep(3)
# p.terminate()
# os.system(f'open \"{op_content}\"')
# os.system("osascript -e 'tell application \"/Applications/微信.app\" to quit'")
for pid in psutil.pids():
p = psutil.Process(pid)
if p.name() == 'War3.exe':
p.terminate()
| 23.15
| 83
| 0.602592
|
62ada1806ff717068dc400eb8843bfc3388928eb
| 1,301
|
py
|
Python
|
tests/test_sessions.py
|
kennethreitz/requests-async
|
213da681c13a79e2f6232920e61f2181d92daa3e
|
[
"Apache-2.0"
] | 7
|
2019-03-22T10:48:42.000Z
|
2019-06-24T03:28:33.000Z
|
tests/test_sessions.py
|
Lt-grint/requests-async
|
b5147b15c67ca504fe35536c950fcca67da9d147
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sessions.py
|
Lt-grint/requests-async
|
b5147b15c67ca504fe35536c950fcca67da9d147
|
[
"Apache-2.0"
] | 2
|
2019-04-15T09:59:52.000Z
|
2019-06-11T08:24:34.000Z
|
import asyncio
import requests_async
import pytest
@pytest.mark.asyncio
async def test_session(server):
url = "http://127.0.0.1:8000/"
with requests_async.Session() as session:
response = await session.get(url)
assert response.status_code == 200
assert response.json() == {"method": "GET", "url": url, "body": ""}
response = await session.post(url)
assert response.status_code == 200
assert response.json() == {"method": "POST", "url": url, "body": ""}
response = await session.put(url)
assert response.status_code == 200
assert response.json() == {"method": "PUT", "url": url, "body": ""}
response = await session.patch(url)
assert response.status_code == 200
assert response.json() == {"method": "PATCH", "url": url, "body": ""}
response = await session.delete(url)
assert response.status_code == 200
assert response.json() == {"method": "DELETE", "url": url, "body": ""}
response = await session.options(url)
assert response.status_code == 200
assert response.json() == {"method": "OPTIONS", "url": url, "body": ""}
response = await session.head(url)
assert response.status_code == 200
assert response.text == ""
| 34.236842
| 79
| 0.598002
|
8c708082f02fa3b39d8b345ddafd84f986fd9b76
| 5,697
|
py
|
Python
|
blabel/label_tools.py
|
kubaraczkowski/blabel
|
afbe6ecd7805ef83e24a52eb60476216c1086711
|
[
"MIT"
] | 91
|
2018-11-07T09:03:44.000Z
|
2022-03-25T04:43:45.000Z
|
blabel/label_tools.py
|
kubaraczkowski/blabel
|
afbe6ecd7805ef83e24a52eb60476216c1086711
|
[
"MIT"
] | 17
|
2019-09-20T11:31:52.000Z
|
2022-02-16T14:37:06.000Z
|
blabel/label_tools.py
|
kubaraczkowski/blabel
|
afbe6ecd7805ef83e24a52eb60476216c1086711
|
[
"MIT"
] | 26
|
2019-09-20T11:20:17.000Z
|
2021-11-11T12:14:51.000Z
|
"""Utilities for label generation.
"""
import base64
from io import BytesIO
import datetime
import textwrap
import qrcode
import barcode as python_barcode
from pystrich.datamatrix import DataMatrixEncoder
from PIL import Image, ImageOps
def now(fmt="%Y-%m-%d %H:%M"):
"""Display the current time.
Default format is "year-month-day hour:minute" but another format can be
provided (see ``datetime`` docs for date formatting).
"""
now = datetime.datetime.now()
if fmt is not None:
now = now.strftime(fmt)
return now
def pil_to_html_imgdata(img, fmt='PNG'):
"""Convert a PIL image into HTML-displayable data.
The result is a string ``data:image/FMT;base64,xxxxxxxxx`` which you
can provide as a "src" parameter to a ``<img/>`` tag.
Examples:
---------
>>> data = pil_to_html_imgdata(my_pil_img)
>>> html_data = '<img src="%s"/>' % data
"""
buffered = BytesIO()
img.save(buffered, format=fmt)
img_str = base64.b64encode(buffered.getvalue())
prefix = 'data:image/%s;charset=utf-8;base64,' % fmt.lower()
return prefix + img_str.decode()
def wrap(text, col_width):
"""Breaks the text into lines with at maximum 'col_width' characters."""
return "\n".join(textwrap.wrap(text, col_width))
def hiro_square(width='100%'):
"""Return a <svg/> string of a Hiro square to be included in HTML."""
svg= """
<svg height="%s" width="%s" version="1.1" viewBox="0 0 4 4"
xmlns="http://www.w3.org/2000/svg">
<rect x="0" y="0" width="4" height="4" fill="#000" stroke-width="0"/>
<rect x="1" y="1" width="2" height="2" fill="#fff" stroke-width="0"/>
</svg>
""" % (width, width)
prefix = "data:image/svg+xml;charset=utf-8;base64,"
return prefix + base64.b64encode(svg.encode()).decode()
def qr_code(data, optimize=20, fill_color="black", back_color="white",
**qr_code_params):
"""Return a QR code's image data.
Powered by the Python library ``qrcode``. See this library's documentation
for more details.
Parameters
----------
data
Data to be encoded in the QR code.
optimize
Chunk length optimization setting.
fill_color, back_color
Colors to use for QRcode and its background.
**qr_code_params
Parameters of the ``qrcode.QRCode`` constructor, such as ``version``,
``error_correction``, ``box_size``, ``border``.
Returns
-------
image_base64_data
A string ``data:image/png;base64,xxxxxxxxx`` which you can provide as a
"src" parameter to a ``<img/>`` tag.
Examples:
---------
>>> data = qr_code('egf45728')
>>> html_data = '<img src="%s"/>' % data
"""
params = dict(box_size=5, border=0)
params.update(qr_code_params)
qr = qrcode.QRCode(**params)
qr.add_data(data, optimize=20)
qri = qr.make_image(fill_color=fill_color, back_color=back_color)
return pil_to_html_imgdata(qri.get_image())
def datamatrix(data, cellsize=2, with_border=False):
"""Return a datamatrix's image data.
Powered by the Python library ``pyStrich``. See this library's documentation
for more details.
Parameters
----------
data
Data to be encoded in the datamatrix.
cellsize
size of the picture in inches (?).
with_border
If false, there will be no border or margin to the datamatrix image.
Returns
-------
image_base64_data
A string ``data:image/png;base64,xxxxxxxxx`` which you can provide as a
"src" parameter to a ``<img/>`` tag.
Examples:
---------
>>> data = datamatrix('EGF')
>>> html_data = '<img src="%s"/>' % data
"""
encoder = DataMatrixEncoder(data)
img_data = encoder.get_imagedata(cellsize=cellsize)
img = Image.open(BytesIO(img_data))
if not with_border:
img = img.crop(ImageOps.invert(img).getbbox())
return pil_to_html_imgdata(img)
def barcode(data, barcode_class='code128', fmt='png', **writer_options):
"""Return a barcode's image data.
Powered by the Python library ``python-barcode``. See this library's
documentation for more details.
Parameters
----------
data
Data to be encoded in the datamatrix.
barcode_class
Class/standard to use to encode the data. Different standards have
different constraints.
writer_options
Various options for the writer to tune the appearance of the barcode
(see python-barcode documentation).
Returns
-------
image_base64_data
A string ``data:image/png;base64,xxxxxxxxx`` which you can provide as a
"src" parameter to a ``<img/>`` tag.
Examples:
---------
>>> data = barcode('EGF12134', barcode_class='code128')
>>> html_data = '<img src="%s"/>' % data
Examples of writer options:
>>> { 'background': 'white',
>>> 'font_size': 10,
>>> 'foreground': 'black',
>>> 'module_height': 15.0,
>>> 'module_width': 0.2,
>>> 'quiet_zone': 6.5,
>>> 'text': '',
>>> 'text_distance': 5.0,
>>> 'write_text': True
>>> }
"""
constructor = python_barcode.get_barcode_class(barcode_class)
data = str(data).zfill(constructor.digits)
writer = {
'svg': python_barcode.writer.ImageWriter,
'png':python_barcode.writer.ImageWriter
}[fmt]
barcode_img = constructor(data, writer=writer())
img = barcode_img.render(writer_options=writer_options)
if fmt == 'png':
return pil_to_html_imgdata(img, fmt='PNG')
else:
prefix = "data:image/svg+xml;charset=utf-8;base64,"
return prefix + base64.b64encode(img).decode()
| 29.518135
| 80
| 0.626821
|
188bd8feac21a8211dd02329d143c9e2016abd26
| 8,446
|
py
|
Python
|
src/learners/maddpg_learner.py
|
at-peter/epymarl
|
e84ee56f435e6fe69e9bb3297256a326f65b3b1f
|
[
"Apache-2.0"
] | null | null | null |
src/learners/maddpg_learner.py
|
at-peter/epymarl
|
e84ee56f435e6fe69e9bb3297256a326f65b3b1f
|
[
"Apache-2.0"
] | null | null | null |
src/learners/maddpg_learner.py
|
at-peter/epymarl
|
e84ee56f435e6fe69e9bb3297256a326f65b3b1f
|
[
"Apache-2.0"
] | null | null | null |
import copy
from components.episode_buffer import EpisodeBatch
from modules.critics.maddpg import MADDPGCritic
import torch as th
from torch.optim import RMSprop, Adam
from controllers.maddpg_controller import gumbel_softmax
from modules.critics import REGISTRY as critic_registry
class MADDPGLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.n_agents = args.n_agents
self.n_actions = args.n_actions
self.logger = logger
self.mac = mac
self.target_mac = copy.deepcopy(self.mac)
self.agent_params = list(mac.parameters())
self.critic = critic_registry[args.critic_type](scheme, args)
self.target_critic = copy.deepcopy(self.critic)
self.critic_params = list(self.critic.parameters())
self.agent_optimiser = Adam(params=self.agent_params, lr=self.args.lr)
self.critic_optimiser = Adam(params=self.critic_params, lr=self.args.lr)
self.log_stats_t = -self.args.learner_log_interval - 1
self.last_target_update_episode = 0
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions_onehot"]
terminated = batch["terminated"][:, :-1].float()
rewards = rewards.unsqueeze(2).expand(-1, -1, self.n_agents, -1)
terminated = terminated.unsqueeze(2).expand(-1, -1, self.n_agents, -1)
mask = 1 - terminated
batch_size = batch.batch_size
if self.args.standardise_rewards:
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# Train the critic
inputs = self._build_inputs(batch)
actions = actions.view(batch_size, -1, 1, self.n_agents * self.n_actions).expand(-1, -1, self.n_agents, -1)
q_taken = self.critic(inputs[:, :-1], actions[:, :-1].detach())
q_taken = q_taken.view(batch_size, -1, 1)
# Use the target actor and target critic network to compute the target q
self.target_mac.init_hidden(batch.batch_size)
target_actions = []
for t in range(1, batch.max_seq_length):
agent_target_outs = self.target_mac.target_actions(batch, t)
target_actions.append(agent_target_outs)
target_actions = th.stack(target_actions, dim=1) # Concat over time
target_actions = target_actions.view(batch_size, -1, 1, self.n_agents * self.n_actions).expand(-1, -1, self.n_agents, -1)
target_vals = self.target_critic(inputs[:, 1:], target_actions.detach())
target_vals = target_vals.view(batch_size, -1, 1)
targets = rewards.reshape(-1, 1) + self.args.gamma * (1 - terminated.reshape(-1, 1)) * target_vals.reshape(-1, 1)
td_error = (q_taken.view(-1, 1) - targets.detach())
masked_td_error = td_error * mask.reshape(-1, 1)
loss = (masked_td_error ** 2).mean()
self.critic_optimiser.zero_grad()
loss.backward()
critic_grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params, self.args.grad_norm_clip)
self.critic_optimiser.step()
# Train the actor
self.mac.init_hidden(batch_size)
pis = []
actions = []
for t in range(batch.max_seq_length-1):
pi = self.mac.forward(batch, t=t).view(batch_size, 1, self.n_agents, -1)
actions.append(gumbel_softmax(pi, hard=True))
pis.append(pi)
actions = th.cat(actions, dim=1)
actions = actions.view(batch_size, -1, 1, self.n_agents * self.n_actions).expand(-1, -1, self.n_agents, -1)
new_actions = []
for i in range(self.n_agents):
temp_action = th.split(actions[:, :, i, :], self.n_actions, dim=2)
actions_i = []
for j in range(self.n_agents):
if i == j:
actions_i.append(temp_action[j])
else:
actions_i.append(temp_action[j].detach())
actions_i = th.cat(actions_i, dim=-1)
new_actions.append(actions_i.unsqueeze(2))
new_actions = th.cat(new_actions, dim=2)
pis = th.cat(pis, dim=1)
pis[pis==-1e10] = 0
pis = pis.reshape(-1, 1)
q = self.critic(inputs[:, :-1], new_actions)
q = q.reshape(-1, 1)
mask = mask.reshape(-1, 1)
# Compute the actor loss
pg_loss = -(q * mask).mean() + self.args.reg * (pis ** 2).mean()
# Optimise agents
self.agent_optimiser.zero_grad()
pg_loss.backward()
agent_grad_norm = th.nn.utils.clip_grad_norm_(self.agent_params, self.args.grad_norm_clip)
self.agent_optimiser.step()
if self.args.target_update_interval_or_tau > 1 and (episode_num - self.last_target_update_episode) / self.args.target_update_interval_or_tau >= 1.0:
self._update_targets_hard()
self.last_target_update_episode = episode_num
elif self.args.target_update_interval_or_tau <= 1.0:
self._update_targets_soft(self.args.target_update_interval_or_tau)
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("critic_loss", loss.item(), t_env)
self.logger.log_stat("critic_grad_norm", critic_grad_norm, t_env)
self.logger.log_stat("agent_grad_norm", agent_grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", masked_td_error.abs().sum().item() / mask_elems, t_env)
self.logger.log_stat("q_taken_mean", (q_taken).sum().item() / mask_elems, t_env)
self.logger.log_stat("target_mean", targets.sum().item() / mask_elems, t_env)
self.logger.log_stat("pg_loss", pg_loss.item(), t_env)
self.logger.log_stat("agent_grad_norm", agent_grad_norm, t_env)
self.log_stats_t = t_env
def _build_inputs(self, batch, t=None):
bs = batch.batch_size
max_t = batch.max_seq_length if t is None else 1
ts = slice(None) if t is None else slice(t, t + 1)
inputs = []
inputs.append(batch["state"][:, ts].unsqueeze(2).expand(-1, -1, self.n_agents, -1))
if self.args.obs_individual_obs:
inputs.append(batch["obs"][:, ts])
# last actions
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, 0:1]))
elif isinstance(t, int):
inputs.append(batch["actions_onehot"][:, slice(t - 1, t)])
else:
last_actions = th.cat([th.zeros_like(batch["actions_onehot"][:, 0:1]), batch["actions_onehot"][:, :-1]],
dim=1)
# last_actions = last_actions.view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
inputs.append(last_actions)
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
inputs = th.cat(inputs, dim=-1)
return inputs
def _update_targets_hard(self):
self.target_mac.load_state(self.mac)
self.target_critic.load_state_dict(self.critic.state_dict())
def _update_targets_soft(self, tau):
for target_param, param in zip(self.target_mac.parameters(), self.mac.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
self.critic.cuda()
self.target_critic.cuda()
def save_models(self, path):
self.mac.save_models(path)
th.save(self.critic.state_dict(), "{}/critic.th".format(path))
th.save(self.agent_optimiser.state_dict(), "{}/agent_opt.th".format(path))
th.save(self.critic_optimiser.state_dict(), "{}/critic_opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
self.agent_optimiser.load_state_dict(
th.load("{}/agent_opt.th".format(path), map_location=lambda storage, loc: storage))
| 45.654054
| 156
| 0.6287
|
6f80d3e9c779d6682b1cc587762ad462a45911e4
| 134
|
py
|
Python
|
dataservice/api/biospecimen/__init__.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 6
|
2018-01-25T13:49:24.000Z
|
2020-03-07T16:25:09.000Z
|
dataservice/api/biospecimen/__init__.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 369
|
2018-01-17T15:22:18.000Z
|
2022-03-10T19:14:56.000Z
|
dataservice/api/biospecimen/__init__.py
|
ConnorBarnhill/kf-api-dataservice
|
547df467a307788882469a25c947a14965a26336
|
[
"Apache-2.0"
] | 3
|
2018-04-11T14:18:37.000Z
|
2018-10-31T19:09:48.000Z
|
from dataservice.api.biospecimen.resources import BiospecimenAPI
from dataservice.api.biospecimen.resources import BiospecimenListAPI
| 44.666667
| 68
| 0.895522
|
746b3f8be8e0bfc2d11fd8fd8ea8717dcf33514e
| 129,224
|
py
|
Python
|
Lib/test/test_os.py
|
Golfist/cpython
|
c4750959acbfc3057f12aaec832483ba30898d1c
|
[
"PSF-2.0"
] | 27
|
2017-04-21T14:57:04.000Z
|
2021-11-03T22:10:38.000Z
|
Lib/test/test_os.py
|
Golfist/cpython
|
c4750959acbfc3057f12aaec832483ba30898d1c
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_os.py
|
Golfist/cpython
|
c4750959acbfc3057f12aaec832483ba30898d1c
|
[
"PSF-2.0"
] | 9
|
2017-04-26T14:14:05.000Z
|
2020-12-14T16:26:41.000Z
|
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import asynchat
import asyncore
import codecs
import contextlib
import decimal
import errno
import fractions
import getpass
import itertools
import locale
import mmap
import os
import pickle
import shutil
import signal
import socket
import stat
import subprocess
import sys
import sysconfig
import time
import unittest
import uuid
import warnings
from test import support
try:
import threading
except ImportError:
threading = None
try:
import resource
except ImportError:
resource = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import _winapi
except ImportError:
_winapi = None
try:
import grp
groups = [g.gr_gid for g in grp.getgrall() if getpass.getuser() in g.gr_mem]
if hasattr(os, 'getgid'):
process_gid = os.getgid()
if process_gid not in groups:
groups.append(process_gid)
except ImportError:
groups = []
try:
import pwd
all_users = [u.pw_uid for u in pwd.getpwall()]
except (ImportError, AttributeError):
all_users = []
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
from test.support.script_helper import assert_python_ok
from test.support import unix_shell
root_in_posix = False
if hasattr(os, 'geteuid'):
root_in_posix = (os.geteuid() == 0)
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if hasattr(sys, 'thread_info') and sys.thread_info.version:
USING_LINUXTHREADS = sys.thread_info.version.startswith("linuxthreads")
else:
USING_LINUXTHREADS = False
# Issue #14110: Some tests fail on FreeBSD if the user is in the wheel group.
HAVE_WHEEL_GROUP = sys.platform.startswith('freebsd') and os.getgid() == 0
@contextlib.contextmanager
def ignore_deprecation_warnings(msg_regex, quiet=False):
with support.check_warnings((msg_regex, DeprecationWarning), quiet=quiet):
yield
def requires_os_func(name):
return unittest.skipUnless(hasattr(os, name), 'requires os.%s' % name)
class _PathLike(os.PathLike):
def __init__(self, path=""):
self.path = path
def __str__(self):
return str(self.path)
def __fspath__(self):
if isinstance(self.path, BaseException):
raise self.path
else:
return self.path
def create_file(filename, content=b'content'):
with open(filename, "xb", 0) as fp:
fp.write(content)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.lexists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
@support.cpython_only
# Skip the test on 32-bit platforms: the number of bytes must fit in a
# Py_ssize_t type
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX,
"needs INT_MAX < PY_SSIZE_T_MAX")
@support.bigmemtest(size=INT_MAX + 10, memuse=1, dry_run=False)
def test_large_read(self, size):
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b'test')
# Issue #21932: Make sure that os.read() does not raise an
# OverflowError for size larger than INT_MAX
with open(support.TESTFN, "rb") as fp:
data = os.read(fp.fileno(), size)
# The test does not try to read more than 2 GB at once because the
# operating system is free to return less bytes than requested.
self.assertEqual(data, b'test')
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_replace(self):
TESTFN2 = support.TESTFN + ".2"
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(support.unlink, TESTFN2)
create_file(support.TESTFN, b"1")
create_file(TESTFN2, b"2")
os.replace(support.TESTFN, TESTFN2)
self.assertRaises(FileNotFoundError, os.stat, support.TESTFN)
with open(TESTFN2, 'r') as f:
self.assertEqual(f.read(), "1")
def test_open_keywords(self):
f = os.open(path=__file__, flags=os.O_RDONLY, mode=0o777,
dir_fd=None)
os.close(f)
def test_symlink_keywords(self):
symlink = support.get_attribute(os, "symlink")
try:
symlink(src='target', dst=support.TESTFN,
target_is_directory=False, dir_fd=None)
except (NotImplementedError, OSError):
pass # No OS support or unprivileged user
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
self.fname = support.TESTFN
self.addCleanup(support.unlink, self.fname)
create_file(self.fname, b"ABC")
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def check_stat_attributes(self, fname):
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
# Make sure that the st_?time and st_?time_ns fields roughly agree
# (they should always agree up to around tens-of-microseconds)
for name in 'st_atime st_mtime st_ctime'.split():
floaty = int(getattr(result, name) * 100000)
nanosecondy = getattr(result, name + "_ns") // 10000
self.assertAlmostEqual(floaty, nanosecondy, delta=2)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
self.check_stat_attributes(fname)
def test_stat_result_pickle(self):
result = os.stat(self.fname)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'stat_result', p)
if proto < 4:
self.assertIn(b'cos\nstat_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs_result_pickle(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'statvfs_result', p)
if proto < 4:
self.assertIn(b'cos\nstatvfs_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except FileNotFoundError:
self.skipTest(r'c:\pagefile.sys does not exist')
except OSError as e:
self.fail("Could not stat pagefile.sys")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_15261(self):
# Verify that stat'ing a closed fd does not cause crash
r, w = os.pipe()
try:
os.stat(r) # should not raise error
finally:
os.close(r)
os.close(w)
with self.assertRaises(OSError) as ctx:
os.stat(r)
self.assertEqual(ctx.exception.errno, errno.EBADF)
def check_file_attributes(self, result):
self.assertTrue(hasattr(result, 'st_file_attributes'))
self.assertTrue(isinstance(result.st_file_attributes, int))
self.assertTrue(0 <= result.st_file_attributes <= 0xFFFFFFFF)
@unittest.skipUnless(sys.platform == "win32",
"st_file_attributes is Win32 specific")
def test_file_attributes(self):
# test file st_file_attributes (FILE_ATTRIBUTE_DIRECTORY not set)
result = os.stat(self.fname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
0)
# test directory st_file_attributes (FILE_ATTRIBUTE_DIRECTORY set)
dirname = support.TESTFN + "dir"
os.mkdir(dirname)
self.addCleanup(os.rmdir, dirname)
result = os.stat(dirname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
stat.FILE_ATTRIBUTE_DIRECTORY)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_access_denied(self):
# Default to FindFirstFile WIN32_FIND_DATA when access is
# denied. See issue 28075.
# os.environ['TEMP'] should be located on a volume that
# supports file ACLs.
fname = os.path.join(os.environ['TEMP'], self.fname)
self.addCleanup(support.unlink, fname)
create_file(fname, b'ABC')
# Deny the right to [S]YNCHRONIZE on the file to
# force CreateFile to fail with ERROR_ACCESS_DENIED.
DETACHED_PROCESS = 8
subprocess.check_call(
['icacls.exe', fname, '/deny', 'Users:(S)'],
creationflags=DETACHED_PROCESS
)
result = os.stat(fname)
self.assertNotEqual(result.st_size, 0)
class UtimeTests(unittest.TestCase):
def setUp(self):
self.dirname = support.TESTFN
self.fname = os.path.join(self.dirname, "f1")
self.addCleanup(support.rmtree, self.dirname)
os.mkdir(self.dirname)
create_file(self.fname)
def restore_float_times(state):
with ignore_deprecation_warnings('stat_float_times'):
os.stat_float_times(state)
# ensure that st_atime and st_mtime are float
with ignore_deprecation_warnings('stat_float_times'):
old_float_times = os.stat_float_times(-1)
self.addCleanup(restore_float_times, old_float_times)
os.stat_float_times(True)
def support_subsecond(self, filename):
# Heuristic to check if the filesystem supports timestamp with
# subsecond resolution: check if float and int timestamps are different
st = os.stat(filename)
return ((st.st_atime != st[7])
or (st.st_mtime != st[8])
or (st.st_ctime != st[9]))
def _test_utime(self, set_time, filename=None):
if not filename:
filename = self.fname
support_subsecond = self.support_subsecond(filename)
if support_subsecond:
# Timestamp with a resolution of 1 microsecond (10^-6).
#
# The resolution of the C internal function used by os.utime()
# depends on the platform: 1 sec, 1 us, 1 ns. Writing a portable
# test with a resolution of 1 ns requires more work:
# see the issue #15745.
atime_ns = 1002003000 # 1.002003 seconds
mtime_ns = 4005006000 # 4.005006 seconds
else:
# use a resolution of 1 second
atime_ns = 5 * 10**9
mtime_ns = 8 * 10**9
set_time(filename, (atime_ns, mtime_ns))
st = os.stat(filename)
if support_subsecond:
self.assertAlmostEqual(st.st_atime, atime_ns * 1e-9, delta=1e-6)
self.assertAlmostEqual(st.st_mtime, mtime_ns * 1e-9, delta=1e-6)
else:
self.assertEqual(st.st_atime, atime_ns * 1e-9)
self.assertEqual(st.st_mtime, mtime_ns * 1e-9)
self.assertEqual(st.st_atime_ns, atime_ns)
self.assertEqual(st.st_mtime_ns, mtime_ns)
def test_utime(self):
def set_time(filename, ns):
# test the ns keyword parameter
os.utime(filename, ns=ns)
self._test_utime(set_time)
@staticmethod
def ns_to_sec(ns):
# Convert a number of nanosecond (int) to a number of seconds (float).
# Round towards infinity by adding 0.5 nanosecond to avoid rounding
# issue, os.utime() rounds towards minus infinity.
return (ns * 1e-9) + 0.5e-9
def test_utime_by_indexed(self):
# pass times as floating point seconds as the second indexed parameter
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test utimensat(timespec), utimes(timeval), utime(utimbuf)
# or utime(time_t)
os.utime(filename, (atime, mtime))
self._test_utime(set_time)
def test_utime_by_times(self):
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test the times keyword parameter
os.utime(filename, times=(atime, mtime))
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks,
"follow_symlinks support for utime required "
"for this test.")
def test_utime_nofollow_symlinks(self):
def set_time(filename, ns):
# use follow_symlinks=False to test utimensat(timespec)
# or lutimes(timeval)
os.utime(filename, ns=ns, follow_symlinks=False)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_fd,
"fd support for utime required for this test.")
def test_utime_fd(self):
def set_time(filename, ns):
with open(filename, 'wb', 0) as fp:
# use a file descriptor to test futimens(timespec)
# or futimes(timeval)
os.utime(fp.fileno(), ns=ns)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_dir_fd,
"dir_fd support for utime required for this test.")
def test_utime_dir_fd(self):
def set_time(filename, ns):
dirname, name = os.path.split(filename)
dirfd = os.open(dirname, os.O_RDONLY)
try:
# pass dir_fd to test utimensat(timespec) or futimesat(timeval)
os.utime(name, dir_fd=dirfd, ns=ns)
finally:
os.close(dirfd)
self._test_utime(set_time)
def test_utime_directory(self):
def set_time(filename, ns):
# test calling os.utime() on a directory
os.utime(filename, ns=ns)
self._test_utime(set_time, filename=self.dirname)
def _test_utime_current(self, set_time):
# Get the system clock
current = time.time()
# Call os.utime() to set the timestamp to the current system clock
set_time(self.fname)
if not self.support_subsecond(self.fname):
delta = 1.0
else:
# On Windows, the usual resolution of time.time() is 15.6 ms
delta = 0.020
st = os.stat(self.fname)
msg = ("st_time=%r, current=%r, dt=%r"
% (st.st_mtime, current, st.st_mtime - current))
self.assertAlmostEqual(st.st_mtime, current,
delta=delta, msg=msg)
def test_utime_current(self):
def set_time(filename):
# Set to the current time in the new way
os.utime(self.fname)
self._test_utime_current(set_time)
def test_utime_current_old(self):
def set_time(filename):
# Set to the current time in the old explicit way.
os.utime(self.fname, None)
self._test_utime_current(set_time)
def get_file_system(self, path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
ok = kernel32.GetVolumeInformationW(root, None, 0,
None, None, None,
buf, len(buf))
if ok:
return buf.value
# return None if the filesystem is unknown
def test_large_time(self):
# Many filesystems are limited to the year 2038. At least, the test
# pass with NTFS filesystem.
if self.get_file_system(self.dirname) != "NTFS":
self.skipTest("requires NTFS")
large = 5000000000 # some day in 2128
os.utime(self.fname, (large, large))
self.assertEqual(os.stat(self.fname).st_mtime, large)
def test_utime_invalid_arguments(self):
# seconds and nanoseconds parameters are mutually exclusive
with self.assertRaises(ValueError):
os.utime(self.fname, (5, 5), ns=(5, 5))
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_update2(self):
os.environ.clear()
os.environ.update(HELLO="World")
with os.popen("%s -c 'echo $HELLO'" % unix_shell) as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_os_popen_iter(self):
with os.popen("%s -c 'echo \"line1\nline2\nline3\"'"
% unix_shell) as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@support.requires_freebsd_version(7)
@support.requires_mac_ver(10, 6)
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
def test_key_type(self):
missing = 'missingkey'
self.assertNotIn(missing, os.environ)
with self.assertRaises(KeyError) as cm:
os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
with self.assertRaises(KeyError) as cm:
del os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
# Wrapper to hide minor differences between os.walk and os.fwalk
# to tests both functions with the same code base
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
return os.walk(top, **kwargs)
def setUp(self):
join = os.path.join
self.addCleanup(support.rmtree, support.TESTFN)
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# SUB21/ not readable
# tmp5
# link/ a symlink to TESTFN.2
# broken_link
# broken_link2
# broken_link3
# TEST2/
# tmp4 a lone file
self.walk_path = join(support.TESTFN, "TEST1")
self.sub1_path = join(self.walk_path, "SUB1")
self.sub11_path = join(self.sub1_path, "SUB11")
sub2_path = join(self.walk_path, "SUB2")
sub21_path = join(sub2_path, "SUB21")
tmp1_path = join(self.walk_path, "tmp1")
tmp2_path = join(self.sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
tmp5_path = join(sub21_path, "tmp3")
self.link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
broken_link_path = join(sub2_path, "broken_link")
broken_link2_path = join(sub2_path, "broken_link2")
broken_link3_path = join(sub2_path, "broken_link3")
# Create stuff.
os.makedirs(self.sub11_path)
os.makedirs(sub2_path)
os.makedirs(sub21_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path, tmp5_path:
with open(path, "x") as f:
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), self.link_path)
os.symlink('broken', broken_link_path, True)
os.symlink(join('tmp3', 'broken'), broken_link2_path, True)
os.symlink(join('SUB21', 'tmp5'), broken_link3_path, True)
self.sub2_tree = (sub2_path, ["SUB21", "link"],
["broken_link", "broken_link2", "broken_link3",
"tmp3"])
else:
self.sub2_tree = (sub2_path, [], ["tmp3"])
os.chmod(sub21_path, 0)
try:
os.listdir(sub21_path)
except PermissionError:
self.addCleanup(os.chmod, sub21_path, stat.S_IRWXU)
else:
os.chmod(sub21_path, stat.S_IRWXU)
os.unlink(tmp5_path)
os.rmdir(sub21_path)
del self.sub2_tree[1][:1]
def test_walk_topdown(self):
# Walk top-down.
all = list(self.walk(self.walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
all[3 - 2 * flipped][-1].sort()
all[3 - 2 * flipped][1].sort()
self.assertEqual(all[0], (self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (self.sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], self.sub2_tree)
def test_walk_prune(self, walk_path=None):
if walk_path is None:
walk_path = self.walk_path
# Prune the search.
all = []
for root, dirs, files in self.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0],
(str(walk_path), ["SUB2"], ["tmp1"]))
all[1][-1].sort()
all[1][1].sort()
self.assertEqual(all[1], self.sub2_tree)
def test_file_like_path(self):
self.test_walk_prune(_PathLike(self.walk_path))
def test_walk_bottom_up(self):
# Walk bottom-up.
all = list(self.walk(self.walk_path, topdown=False))
self.assertEqual(len(all), 4, all)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
all[2 - 2 * flipped][-1].sort()
all[2 - 2 * flipped][1].sort()
self.assertEqual(all[3],
(self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped],
(self.sub11_path, [], []))
self.assertEqual(all[flipped + 1],
(self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped],
self.sub2_tree)
def test_walk_symlink(self):
if not support.can_symlink():
self.skipTest("need symlink support")
# Walk, following symlinks.
walk_it = self.walk(self.walk_path, follow_symlinks=True)
for root, dirs, files in walk_it:
if root == self.link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def test_walk_bad_dir(self):
# Walk top-down.
errors = []
walk_it = self.walk(self.walk_path, onerror=errors.append)
root, dirs, files = next(walk_it)
self.assertEqual(errors, [])
dir1 = 'SUB1'
path1 = os.path.join(root, dir1)
path1new = os.path.join(root, dir1 + '.new')
os.rename(path1, path1new)
try:
roots = [r for r, d, f in walk_it]
self.assertTrue(errors)
self.assertNotIn(path1, roots)
self.assertNotIn(path1new, roots)
for dir2 in dirs:
if dir2 != dir1:
self.assertIn(os.path.join(root, dir2), roots)
finally:
os.rename(path1new, path1)
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class FwalkTests(WalkTests):
"""Tests for os.fwalk()."""
def walk(self, top, **kwargs):
for root, dirs, files, root_fd in self.fwalk(top, **kwargs):
yield (root, dirs, files)
def fwalk(self, *args, **kwargs):
return os.fwalk(*args, **kwargs)
def _compare_to_walk(self, walk_kwargs, fwalk_kwargs):
"""
compare with walk() results.
"""
walk_kwargs = walk_kwargs.copy()
fwalk_kwargs = fwalk_kwargs.copy()
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
walk_kwargs.update(topdown=topdown, followlinks=follow_symlinks)
fwalk_kwargs.update(topdown=topdown, follow_symlinks=follow_symlinks)
expected = {}
for root, dirs, files in os.walk(**walk_kwargs):
expected[root] = (set(dirs), set(files))
for root, dirs, files, rootfd in self.fwalk(**fwalk_kwargs):
self.assertIn(root, expected)
self.assertEqual(expected[root], (set(dirs), set(files)))
def test_compare_to_walk(self):
kwargs = {'top': support.TESTFN}
self._compare_to_walk(kwargs, kwargs)
def test_dir_fd(self):
try:
fd = os.open(".", os.O_RDONLY)
walk_kwargs = {'top': support.TESTFN}
fwalk_kwargs = walk_kwargs.copy()
fwalk_kwargs['dir_fd'] = fd
self._compare_to_walk(walk_kwargs, fwalk_kwargs)
finally:
os.close(fd)
def test_yields_correct_dir_fd(self):
# check returned file descriptors
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
args = support.TESTFN, topdown, None
for root, dirs, files, rootfd in self.fwalk(*args, follow_symlinks=follow_symlinks):
# check that the FD is valid
os.fstat(rootfd)
# redundant check
os.stat(rootfd)
# check that listdir() returns consistent information
self.assertEqual(set(os.listdir(rootfd)), set(dirs) | set(files))
def test_fd_leak(self):
# Since we're opening a lot of FDs, we must be careful to avoid leaks:
# we both check that calling fwalk() a large number of times doesn't
# yield EMFILE, and that the minimum allocated FD hasn't changed.
minfd = os.dup(1)
os.close(minfd)
for i in range(256):
for x in self.fwalk(support.TESTFN):
pass
newfd = os.dup(1)
self.addCleanup(os.close, newfd)
self.assertEqual(newfd, minfd)
class BytesWalkTests(WalkTests):
"""Tests for os.walk() with bytes."""
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
for broot, bdirs, bfiles in os.walk(os.fsencode(top), **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class BytesFwalkTests(FwalkTests):
"""Tests for os.walk() with bytes."""
def fwalk(self, top='.', *args, **kwargs):
for broot, bdirs, bfiles, topfd in os.fwalk(os.fsencode(top), *args, **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files, topfd)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_mode(self):
with support.temp_umask(0o002):
base = support.TESTFN
parent = os.path.join(base, 'dir1')
path = os.path.join(parent, 'dir2')
os.makedirs(path, 0o555)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
if os.name != 'nt':
self.assertEqual(stat.S_IMODE(os.stat(path).st_mode), 0o555)
self.assertEqual(stat.S_IMODE(os.stat(parent).st_mode), 0o775)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
os.makedirs(path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
# Issue #25583: A drive root could raise PermissionError on Windows
os.makedirs(os.path.abspath('/'), exist_ok=True)
def test_exist_ok_s_isgid_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
S_ISGID = stat.S_ISGID
mode = 0o777
old_mask = os.umask(0o022)
try:
existing_testfn_mode = stat.S_IMODE(
os.lstat(support.TESTFN).st_mode)
try:
os.chmod(support.TESTFN, existing_testfn_mode | S_ISGID)
except PermissionError:
raise unittest.SkipTest('Cannot set S_ISGID for dir.')
if (os.lstat(support.TESTFN).st_mode & S_ISGID != S_ISGID):
raise unittest.SkipTest('No support for S_ISGID dir mode.')
# The os should apply S_ISGID from the parent dir for us, but
# this test need not depend on that behavior. Be explicit.
os.makedirs(path, mode | S_ISGID)
# http://bugs.python.org/issue14992
# Should not fail when the bit is already set.
os.makedirs(path, mode, exist_ok=True)
# remove the bit.
os.chmod(path, stat.S_IMODE(os.lstat(path).st_mode) & ~S_ISGID)
# May work even when the bit is not already set when demanded.
os.makedirs(path, mode | S_ISGID, exist_ok=True)
finally:
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
f = open(path, 'w')
f.write('abc')
f.close()
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
@unittest.skipUnless(hasattr(os, 'chown'), "Test needs chown")
class ChownFileTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.mkdir(support.TESTFN)
def test_chown_uid_gid_arguments_must_be_index(self):
stat = os.stat(support.TESTFN)
uid = stat.st_uid
gid = stat.st_gid
for value in (-1.0, -1j, decimal.Decimal(-1), fractions.Fraction(-2, 2)):
self.assertRaises(TypeError, os.chown, support.TESTFN, value, gid)
self.assertRaises(TypeError, os.chown, support.TESTFN, uid, value)
self.assertIsNone(os.chown(support.TESTFN, uid, gid))
self.assertIsNone(os.chown(support.TESTFN, -1, -1))
@unittest.skipUnless(len(groups) > 1, "test needs more than one group")
def test_chown(self):
gid_1, gid_2 = groups[:2]
uid = os.stat(support.TESTFN).st_uid
os.chown(support.TESTFN, uid, gid_1)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_1)
os.chown(support.TESTFN, uid, gid_2)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_2)
@unittest.skipUnless(root_in_posix and len(all_users) > 1,
"test needs root privilege and more than one user")
def test_chown_with_root(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
os.chown(support.TESTFN, uid_1, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_1)
os.chown(support.TESTFN, uid_2, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_2)
@unittest.skipUnless(not root_in_posix and len(all_users) > 1,
"test needs non-root account and more than one user")
def test_chown_without_permission(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
with self.assertRaises(PermissionError):
os.chown(support.TESTFN, uid_1, gid)
os.chown(support.TESTFN, uid_2, gid)
@classmethod
def tearDownClass(cls):
os.rmdir(support.TESTFN)
class RemoveDirsTests(unittest.TestCase):
def setUp(self):
os.makedirs(support.TESTFN)
def tearDown(self):
support.rmtree(support.TESTFN)
def test_remove_all(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertFalse(os.path.exists(dira))
self.assertFalse(os.path.exists(support.TESTFN))
def test_remove_partial(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dira, 'file.txt'))
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
def test_remove_nothing(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dirb, 'file.txt'))
with self.assertRaises(OSError):
os.removedirs(dirb)
self.assertTrue(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb', 0) as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
self.assertIsInstance(data1, bytes)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.buffer.write(data)',
'sys.stdout.buffer.flush()'))
out = assert_python_ok('-c', code)
stdout = out[1]
self.assertEqual(len(stdout), 16)
return stdout
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
@unittest.skipUnless(hasattr(os, 'getrandom'), 'need os.getrandom()')
class GetRandomTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.getrandom(1)
except OSError as exc:
if exc.errno == errno.ENOSYS:
# Python compiled on a more recent Linux version
# than the current Linux kernel
raise unittest.SkipTest("getrandom() syscall fails with ENOSYS")
else:
raise
def test_getrandom_type(self):
data = os.getrandom(16)
self.assertIsInstance(data, bytes)
self.assertEqual(len(data), 16)
def test_getrandom0(self):
empty = os.getrandom(0)
self.assertEqual(empty, b'')
def test_getrandom_random(self):
self.assertTrue(hasattr(os, 'GRND_RANDOM'))
# Don't test os.getrandom(1, os.GRND_RANDOM) to not consume the rare
# resource /dev/random
def test_getrandom_nonblock(self):
# The call must not fail. Check also that the flag exists
try:
os.getrandom(1, os.GRND_NONBLOCK)
except BlockingIOError:
# System urandom is not initialized yet
pass
def test_getrandom_value(self):
data1 = os.getrandom(16)
data2 = os.getrandom(16)
self.assertNotEqual(data1, data2)
# os.urandom() doesn't use a file descriptor when it is implemented with the
# getentropy() function, the getrandom() function or the getrandom() syscall
OS_URANDOM_DONT_USE_FD = (
sysconfig.get_config_var('HAVE_GETENTROPY') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1)
@unittest.skipIf(OS_URANDOM_DONT_USE_FD ,
"os.random() does not use a file descriptor")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
def test_urandom_fd_closed(self):
# Issue #21207: urandom() should reopen its fd to /dev/urandom if
# closed.
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
os.closerange(3, 256)
sys.stdout.buffer.write(os.urandom(4))
"""
rc, out, err = assert_python_ok('-Sc', code)
def test_urandom_fd_reopened(self):
# Issue #21207: urandom() should detect its fd to /dev/urandom
# changed to something else, and reopen it.
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b"x" * 256)
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
else:
# Found the urandom fd (XXX hopefully)
break
os.closerange(3, 256)
with open({TESTFN!r}, 'rb') as f:
new_fd = f.fileno()
# Issue #26935: posix allows new_fd and fd to be equal but
# some libc implementations have dup2 return an error in this
# case.
if new_fd != fd:
os.dup2(new_fd, fd)
sys.stdout.buffer.write(os.urandom(4))
sys.stdout.buffer.write(os.urandom(4))
""".format(TESTFN=support.TESTFN)
rc, out, err = assert_python_ok('-Sc', code)
self.assertEqual(len(out), 8)
self.assertNotEqual(out[0:4], out[4:8])
rc, out2, err2 = assert_python_ok('-Sc', code)
self.assertEqual(len(out2), 8)
self.assertNotEqual(out2, out)
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execv_with_bad_arglist(self):
self.assertRaises(ValueError, os.execv, 'notepad', ())
self.assertRaises(ValueError, os.execv, 'notepad', [])
self.assertRaises(ValueError, os.execv, 'notepad', ('',))
self.assertRaises(ValueError, os.execv, 'notepad', [''])
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
self.assertRaises(ValueError, os.execvpe, 'notepad', [], {})
self.assertRaises(ValueError, os.execvpe, 'notepad', [''], {})
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def setUp(self):
try:
os.stat(support.TESTFN)
except FileNotFoundError:
exists = False
except OSError as exc:
exists = True
self.fail("file %s must not exist; os.stat failed with %s"
% (support.TESTFN, exc))
else:
self.fail("file %s must not exist" % support.TESTFN)
def test_rename(self):
self.assertRaises(OSError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(OSError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(OSError, os.chdir, support.TESTFN)
def test_mkdir(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "x") as f:
self.assertRaises(OSError, os.mkdir, support.TESTFN)
def test_utime(self):
self.assertRaises(OSError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(OSError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn't raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise an OSError with a bad file descriptor"
% f)
@unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
@unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
@unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
self.check(os.dup2, 20)
@unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
self.check(os.fchmod, 0)
@unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
self.check(os.fchown, -1, -1)
@unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
self.check(os.pathconf, "PC_NAME_MAX")
self.check(os.fpathconf, "PC_NAME_MAX")
@unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
self.check(os.truncate, 0)
self.check(os.ftruncate, 0)
@unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
self.check(os.lseek, 0, 0)
@unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
self.check(os.read, 1)
@unittest.skipUnless(hasattr(os, 'readv'), 'test needs os.readv()')
def test_readv(self):
buf = bytearray(10)
self.check(os.readv, [buf])
@unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
self.check(os.tcsetpgrp, 0)
@unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
self.check(os.write, b" ")
@unittest.skipUnless(hasattr(os, 'writev'), 'test needs os.writev()')
def test_writev(self):
self.check(os.writev, [b'abc'])
def test_inheritable(self):
self.check(os.get_inheritable)
self.check(os.set_inheritable, True)
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
def test_blocking(self):
self.check(os.get_blocking)
self.check(os.set_blocking, True)
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
create_file(file1)
os.link(file1, file2)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class PosixUidGidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
elif support.TESTFN_NONASCII:
self.dir = support.TESTFN_NONASCII
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if support.TESTFN_NONASCII:
add_filename(support.TESTFN_NONASCII)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
support.create_empty_file(os.path.join(self.bdir, fn))
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
# test listdir without arguments
current_directory = os.getcwd()
try:
os.chdir(os.sep)
self.assertEqual(set(os.listdir()), set(os.listdir(os.sep)))
finally:
os.chdir(current_directory)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs(self):
# issue #9645
for fn in self.unicodefn:
# should not fail with file not found error
fullname = os.path.join(self.dir, fn)
os.statvfs(fullname)
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle Ctrl+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ListdirTests(unittest.TestCase):
"""Test listdir on Windows."""
def setUp(self):
self.created_paths = []
for i in range(2):
dir_name = 'SUB%d' % i
dir_path = os.path.join(support.TESTFN, dir_name)
file_name = 'FILE%d' % i
file_path = os.path.join(support.TESTFN, file_name)
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
self.created_paths.extend([dir_name, file_name])
self.created_paths.sort()
def tearDown(self):
shutil.rmtree(support.TESTFN)
def test_listdir_no_extended_path(self):
"""Test when the path is not an "extended" path."""
# unicode
self.assertEqual(
sorted(os.listdir(support.TESTFN)),
self.created_paths)
# bytes
self.assertEqual(
sorted(os.listdir(os.fsencode(support.TESTFN))),
[os.fsencode(path) for path in self.created_paths])
def test_listdir_extended_path(self):
"""Test when the path starts with '\\\\?\\'."""
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
# unicode
path = '\\\\?\\' + os.path.abspath(support.TESTFN)
self.assertEqual(
sorted(os.listdir(path)),
self.created_paths)
# bytes
path = b'\\\\?\\' + os.fsencode(os.path.abspath(support.TESTFN))
self.assertEqual(
sorted(os.listdir(path)),
[os.fsencode(path) for path in self.created_paths])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
@unittest.skip("currently fails; consider for improvement")
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider having isdir return true for directory links
self.assertTrue(os.path.isdir(self.missing_link))
@unittest.skip("currently fails; consider for improvement")
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider allowing rmdir to remove directory links
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
self.addCleanup(support.rmtree, level1)
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
create_file(file1)
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32JunctionTests(unittest.TestCase):
junction = 'junctiontest'
junction_target = os.path.dirname(os.path.abspath(__file__))
def setUp(self):
assert os.path.exists(self.junction_target)
assert not os.path.exists(self.junction)
def tearDown(self):
if os.path.exists(self.junction):
# os.rmdir delegates to Windows' RemoveDirectoryW,
# which removes junction points safely.
os.rmdir(self.junction)
def test_create_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.exists(self.junction))
self.assertTrue(os.path.isdir(self.junction))
# Junctions are not recognized as links.
self.assertFalse(os.path.islink(self.junction))
def test_unlink_removes_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.exists(self.junction))
os.unlink(self.junction)
self.assertFalse(os.path.exists(self.junction))
@support.skip_unless_symlink
class NonLocalSymlinkTests(unittest.TestCase):
def setUp(self):
r"""
Create this structure:
base
\___ some_dir
"""
os.makedirs('base/some_dir')
def tearDown(self):
shutil.rmtree('base')
def test_directory_link_nonlocal(self):
"""
The symlink target should resolve relative to the link, not relative
to the current directory.
Then, link base/some_link -> base/some_dir and ensure that some_link
is resolved as a directory.
In issue13772, it was discovered that directory detection failed if
the symlink target was not specified relative to the current
directory, which was a defect in the implementation.
"""
src = os.path.join('base', 'some_link')
os.symlink('some_dir', src)
assert os.path.isdir(src)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class DeviceEncodingTests(unittest.TestCase):
def test_bad_fd(self):
# Return None when an fd doesn't actually exist.
self.assertIsNone(os.device_encoding(123456))
@unittest.skipUnless(os.isatty(0) and (sys.platform.startswith('win') or
(hasattr(locale, 'nl_langinfo') and hasattr(locale, 'CODESET'))),
'test requires a tty and either Windows or nl_langinfo(CODESET)')
def test_device_encoding(self):
encoding = os.device_encoding(0)
self.assertIsNotNone(encoding)
self.assertTrue(codecs.lookup(encoding))
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
def test_waitpid(self):
args = [sys.executable, '-c', 'pass']
# Add an implicit test for PyUnicode_FSConverter().
pid = os.spawnv(os.P_NOWAIT, _PathLike(args[0]), args)
status = os.waitpid(pid, 0)
self.assertEqual(status, (pid, 0))
class SpawnTests(unittest.TestCase):
def create_args(self, *, with_env=False, use_bytes=False):
self.exitcode = 17
filename = support.TESTFN
self.addCleanup(support.unlink, filename)
if not with_env:
code = 'import sys; sys.exit(%s)' % self.exitcode
else:
self.env = dict(os.environ)
# create an unique key
self.key = str(uuid.uuid4())
self.env[self.key] = self.key
# read the variable from os.environ to check that it exists
code = ('import sys, os; magic = os.environ[%r]; sys.exit(%s)'
% (self.key, self.exitcode))
with open(filename, "w") as fp:
fp.write(code)
args = [sys.executable, filename]
if use_bytes:
args = [os.fsencode(a) for a in args]
self.env = {os.fsencode(k): os.fsencode(v)
for k, v in self.env.items()}
return args
@requires_os_func('spawnl')
def test_spawnl(self):
args = self.create_args()
exitcode = os.spawnl(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnle')
def test_spawnle(self):
args = self.create_args(with_env=True)
exitcode = os.spawnle(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlp')
def test_spawnlp(self):
args = self.create_args()
exitcode = os.spawnlp(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlpe')
def test_spawnlpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnlpe(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_spawnv(self):
args = self.create_args()
exitcode = os.spawnv(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnve')
def test_spawnve(self):
args = self.create_args(with_env=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvp')
def test_spawnvp(self):
args = self.create_args()
exitcode = os.spawnvp(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvpe')
def test_spawnvpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnvpe(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_nowait(self):
args = self.create_args()
pid = os.spawnv(os.P_NOWAIT, args[0], args)
result = os.waitpid(pid, 0)
self.assertEqual(result[0], pid)
status = result[1]
if hasattr(os, 'WIFEXITED'):
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), self.exitcode)
else:
self.assertEqual(status, self.exitcode << 8)
@requires_os_func('spawnve')
def test_spawnve_bytes(self):
# Test bytes handling in parse_arglist and parse_envlist (#28114)
args = self.create_args(with_env=True, use_bytes=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnl')
def test_spawnl_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0])
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0], '')
@requires_os_func('spawnle')
def test_spawnle_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], {})
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], '', {})
@requires_os_func('spawnv')
def test_spawnv_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ())
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [])
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ('',))
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [''])
@requires_os_func('spawnve')
def test_spawnve_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], (), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [], {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], ('',), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [''], {})
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
@unittest.skipUnless(hasattr(os, 'getpriority') and hasattr(os, 'setpriority'),
"needs os.getpriority and os.setpriority")
class ProgramPriorityTests(unittest.TestCase):
"""Tests for os.getpriority() and os.setpriority()."""
def test_set_get_priority(self):
base = os.getpriority(os.PRIO_PROCESS, os.getpid())
os.setpriority(os.PRIO_PROCESS, os.getpid(), base + 1)
try:
new_prio = os.getpriority(os.PRIO_PROCESS, os.getpid())
if base >= 19 and new_prio <= 19:
raise unittest.SkipTest("unable to reliably test setpriority "
"at current nice level of %s" % base)
else:
self.assertEqual(new_prio, base + 1)
finally:
try:
os.setpriority(os.PRIO_PROCESS, os.getpid(), base)
except OSError as err:
if err.errno != errno.EACCES:
raise
if threading is not None:
class SendfileTestServer(asyncore.dispatcher, threading.Thread):
class Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.in_buffer = []
self.closed = False
self.push(b"220 ready\r\n")
def handle_read(self):
data = self.recv(4096)
self.in_buffer.append(data)
def get_data(self):
return b''.join(self.in_buffer)
def handle_close(self):
self.close()
self.closed = True
def handle_error(self):
raise
def __init__(self, address):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self._active = False
self._active_lock = threading.Lock()
# --- public API
@property
def running(self):
return self._active
def start(self):
assert not self.running
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def stop(self):
assert self.running
self._active = False
self.join()
def wait(self):
# wait for handler connection to be closed, then stop the server
while not getattr(self.handler_instance, "closed", False):
time.sleep(0.001)
self.stop()
# --- internals
def run(self):
self._active = True
self.__flag.set()
while self._active and asyncore.socket_map:
self._active_lock.acquire()
asyncore.loop(timeout=0.001, count=1)
self._active_lock.release()
asyncore.close_all()
def handle_accept(self):
conn, addr = self.accept()
self.handler_instance = self.Handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
@unittest.skipUnless(threading is not None, "test needs threading module")
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
class TestSendfile(unittest.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KB
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
not sys.platform.startswith("solaris") and \
not sys.platform.startswith("sunos")
requires_headers_trailers = unittest.skipUnless(SUPPORT_HEADERS_TRAILERS,
'requires headers and trailers support')
@classmethod
def setUpClass(cls):
cls.key = support.threading_setup()
create_file(support.TESTFN, cls.DATA)
@classmethod
def tearDownClass(cls):
support.threading_cleanup(*cls.key)
support.unlink(support.TESTFN)
def setUp(self):
self.server = SendfileTestServer((support.HOST, 0))
self.server.start()
self.client = socket.socket()
self.client.connect((self.server.host, self.server.port))
self.client.settimeout(1)
# synchronize by waiting for "220 ready" response
self.client.recv(1024)
self.sockno = self.client.fileno()
self.file = open(support.TESTFN, 'rb')
self.fileno = self.file.fileno()
def tearDown(self):
self.file.close()
self.client.close()
if self.server.running:
self.server.stop()
def sendfile_wrapper(self, sock, file, offset, nbytes, headers=[], trailers=[]):
"""A higher level wrapper representing how an application is
supposed to use sendfile().
"""
while 1:
try:
if self.SUPPORT_HEADERS_TRAILERS:
return os.sendfile(sock, file, offset, nbytes, headers,
trailers)
else:
return os.sendfile(sock, file, offset, nbytes)
except OSError as err:
if err.errno == errno.ECONNRESET:
# disconnected
raise
elif err.errno in (errno.EAGAIN, errno.EBUSY):
# we have to retry send data
continue
else:
raise
def test_send_whole_file(self):
# normal send
total_sent = 0
offset = 0
nbytes = 4096
while total_sent < len(self.DATA):
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.assertEqual(offset, total_sent)
self.assertEqual(total_sent, len(self.DATA))
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(len(data), len(self.DATA))
self.assertEqual(data, self.DATA)
def test_send_at_certain_offset(self):
# start sending a file at a certain offset
total_sent = 0
offset = len(self.DATA) // 2
must_send = len(self.DATA) - offset
nbytes = 4096
while total_sent < must_send:
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
expected = self.DATA[len(self.DATA) // 2:]
self.assertEqual(total_sent, len(expected))
self.assertEqual(len(data), len(expected))
self.assertEqual(data, expected)
def test_offset_overflow(self):
# specify an offset > file size
offset = len(self.DATA) + 4096
try:
sent = os.sendfile(self.sockno, self.fileno, offset, 4096)
except OSError as e:
# Solaris can raise EINVAL if offset >= file length, ignore.
if e.errno != errno.EINVAL:
raise
else:
self.assertEqual(sent, 0)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b'')
def test_invalid_offset(self):
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, -1, 4096)
self.assertEqual(cm.exception.errno, errno.EINVAL)
def test_keywords(self):
# Keyword arguments should be supported
os.sendfile(out=self.sockno, offset=0, count=4096,
**{'in': self.fileno})
if self.SUPPORT_HEADERS_TRAILERS:
os.sendfile(self.sockno, self.fileno, offset=0, count=4096,
headers=(), trailers=(), flags=0)
# --- headers / trailers tests
@requires_headers_trailers
def test_headers(self):
total_sent = 0
sent = os.sendfile(self.sockno, self.fileno, 0, 4096,
headers=[b"x" * 512])
total_sent += sent
offset = 4096
nbytes = 4096
while 1:
sent = self.sendfile_wrapper(self.sockno, self.fileno,
offset, nbytes)
if sent == 0:
break
total_sent += sent
offset += sent
expected_data = b"x" * 512 + self.DATA
self.assertEqual(total_sent, len(expected_data))
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(hash(data), hash(expected_data))
@requires_headers_trailers
def test_trailers(self):
TESTFN2 = support.TESTFN + "2"
file_data = b"abcdef"
self.addCleanup(support.unlink, TESTFN2)
create_file(TESTFN2, file_data)
with open(TESTFN2, 'rb') as f:
os.sendfile(self.sockno, f.fileno(), 0, len(file_data),
trailers=[b"1234"])
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b"abcdef1234")
@requires_headers_trailers
@unittest.skipUnless(hasattr(os, 'SF_NODISKIO'),
'test needs os.SF_NODISKIO')
def test_flags(self):
try:
os.sendfile(self.sockno, self.fileno, 0, 4096,
flags=os.SF_NODISKIO)
except OSError as err:
if err.errno not in (errno.EBUSY, errno.EAGAIN):
raise
def supports_extended_attributes():
if not hasattr(os, "setxattr"):
return False
try:
with open(support.TESTFN, "xb", 0) as fp:
try:
os.setxattr(fp.fileno(), b"user.test", b"")
except OSError:
return False
finally:
support.unlink(support.TESTFN)
return True
@unittest.skipUnless(supports_extended_attributes(),
"no non-broken extended attribute support")
# Kernels < 2.6.39 don't respect setxattr flags.
@support.requires_linux_version(2, 6, 39)
class ExtendedAttributeTests(unittest.TestCase):
def _check_xattrs_str(self, s, getxattr, setxattr, removexattr, listxattr, **kwargs):
fn = support.TESTFN
self.addCleanup(support.unlink, fn)
create_file(fn)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
init_xattr = listxattr(fn)
self.assertIsInstance(init_xattr, list)
setxattr(fn, s("user.test"), b"", **kwargs)
xattr = set(init_xattr)
xattr.add("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"")
setxattr(fn, s("user.test"), b"hello", os.XATTR_REPLACE, **kwargs)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"hello")
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test"), b"bye", os.XATTR_CREATE, **kwargs)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test2"), b"bye", os.XATTR_REPLACE, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
setxattr(fn, s("user.test2"), b"foo", os.XATTR_CREATE, **kwargs)
xattr.add("user.test2")
self.assertEqual(set(listxattr(fn)), xattr)
removexattr(fn, s("user.test"), **kwargs)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
xattr.remove("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, s("user.test2"), **kwargs), b"foo")
setxattr(fn, s("user.test"), b"a"*1024, **kwargs)
self.assertEqual(getxattr(fn, s("user.test"), **kwargs), b"a"*1024)
removexattr(fn, s("user.test"), **kwargs)
many = sorted("user.test{}".format(i) for i in range(100))
for thing in many:
setxattr(fn, thing, b"x", **kwargs)
self.assertEqual(set(listxattr(fn)), set(init_xattr) | set(many))
def _check_xattrs(self, *args, **kwargs):
self._check_xattrs_str(str, *args, **kwargs)
support.unlink(support.TESTFN)
self._check_xattrs_str(os.fsencode, *args, **kwargs)
support.unlink(support.TESTFN)
def test_simple(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr)
def test_lpath(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr, follow_symlinks=False)
def test_fds(self):
def getxattr(path, *args):
with open(path, "rb") as fp:
return os.getxattr(fp.fileno(), *args)
def setxattr(path, *args):
with open(path, "wb", 0) as fp:
os.setxattr(fp.fileno(), *args)
def removexattr(path, *args):
with open(path, "wb", 0) as fp:
os.removexattr(fp.fileno(), *args)
def listxattr(path, *args):
with open(path, "rb") as fp:
return os.listxattr(fp.fileno(), *args)
self._check_xattrs(getxattr, setxattr, removexattr, listxattr)
@unittest.skipUnless(hasattr(os, 'get_terminal_size'), "requires os.get_terminal_size")
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
try:
size = os.get_terminal_size()
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_stty_match(self):
"""Check if stty returns the same results
stty actually tests stdin, so get_terminal_size is invoked on
stdin explicitly. If stty succeeded, then get_terminal_size()
should work too.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
try:
actual = os.get_terminal_size(sys.__stdin__.fileno())
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertEqual(expected, actual)
class OSErrorTests(unittest.TestCase):
def setUp(self):
class Str(str):
pass
self.bytes_filenames = []
self.unicode_filenames = []
if support.TESTFN_UNENCODABLE is not None:
decoded = support.TESTFN_UNENCODABLE
else:
decoded = support.TESTFN
self.unicode_filenames.append(decoded)
self.unicode_filenames.append(Str(decoded))
if support.TESTFN_UNDECODABLE is not None:
encoded = support.TESTFN_UNDECODABLE
else:
encoded = os.fsencode(support.TESTFN)
self.bytes_filenames.append(encoded)
self.bytes_filenames.append(bytearray(encoded))
self.bytes_filenames.append(memoryview(encoded))
self.filenames = self.bytes_filenames + self.unicode_filenames
def test_oserror_filename(self):
funcs = [
(self.filenames, os.chdir,),
(self.filenames, os.chmod, 0o777),
(self.filenames, os.lstat,),
(self.filenames, os.open, os.O_RDONLY),
(self.filenames, os.rmdir,),
(self.filenames, os.stat,),
(self.filenames, os.unlink,),
]
if sys.platform == "win32":
funcs.extend((
(self.bytes_filenames, os.rename, b"dst"),
(self.bytes_filenames, os.replace, b"dst"),
(self.unicode_filenames, os.rename, "dst"),
(self.unicode_filenames, os.replace, "dst"),
(self.unicode_filenames, os.listdir, ),
))
else:
funcs.extend((
(self.filenames, os.listdir,),
(self.filenames, os.rename, "dst"),
(self.filenames, os.replace, "dst"),
))
if hasattr(os, "chown"):
funcs.append((self.filenames, os.chown, 0, 0))
if hasattr(os, "lchown"):
funcs.append((self.filenames, os.lchown, 0, 0))
if hasattr(os, "truncate"):
funcs.append((self.filenames, os.truncate, 0))
if hasattr(os, "chflags"):
funcs.append((self.filenames, os.chflags, 0))
if hasattr(os, "lchflags"):
funcs.append((self.filenames, os.lchflags, 0))
if hasattr(os, "chroot"):
funcs.append((self.filenames, os.chroot,))
if hasattr(os, "link"):
if sys.platform == "win32":
funcs.append((self.bytes_filenames, os.link, b"dst"))
funcs.append((self.unicode_filenames, os.link, "dst"))
else:
funcs.append((self.filenames, os.link, "dst"))
if hasattr(os, "listxattr"):
funcs.extend((
(self.filenames, os.listxattr,),
(self.filenames, os.getxattr, "user.test"),
(self.filenames, os.setxattr, "user.test", b'user'),
(self.filenames, os.removexattr, "user.test"),
))
if hasattr(os, "lchmod"):
funcs.append((self.filenames, os.lchmod, 0o777))
if hasattr(os, "readlink"):
if sys.platform == "win32":
funcs.append((self.unicode_filenames, os.readlink,))
else:
funcs.append((self.filenames, os.readlink,))
for filenames, func, *func_args in funcs:
for name in filenames:
try:
if isinstance(name, (str, bytes)):
func(name, *func_args)
else:
with self.assertWarnsRegex(DeprecationWarning, 'should be'):
func(name, *func_args)
except OSError as err:
self.assertIs(err.filename, name, str(func))
except UnicodeDecodeError:
pass
else:
self.fail("No exception thrown by {}".format(func))
class CPUCountTests(unittest.TestCase):
def test_cpu_count(self):
cpus = os.cpu_count()
if cpus is not None:
self.assertIsInstance(cpus, int)
self.assertGreater(cpus, 0)
else:
self.skipTest("Could not determine the number of CPUs")
class FDInheritanceTests(unittest.TestCase):
def test_get_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
os.set_inheritable(fd, True)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
os.set_inheritable(fd, True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_open(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
@unittest.skipUnless(hasattr(os, 'pipe'), "need os.pipe()")
def test_pipe(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
self.assertEqual(os.get_inheritable(rfd), False)
self.assertEqual(os.get_inheritable(wfd), False)
def test_dup(self):
fd1 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd1)
fd2 = os.dup(fd1)
self.addCleanup(os.close, fd2)
self.assertEqual(os.get_inheritable(fd2), False)
@unittest.skipUnless(hasattr(os, 'dup2'), "need os.dup2()")
def test_dup2(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
# inheritable by default
fd2 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd2)
self.assertEqual(os.get_inheritable(fd2), True)
finally:
os.close(fd2)
# force non-inheritable
fd3 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd3, inheritable=False)
self.assertEqual(os.get_inheritable(fd3), False)
finally:
os.close(fd3)
@unittest.skipUnless(hasattr(os, 'openpty'), "need os.openpty()")
def test_openpty(self):
master_fd, slave_fd = os.openpty()
self.addCleanup(os.close, master_fd)
self.addCleanup(os.close, slave_fd)
self.assertEqual(os.get_inheritable(master_fd), False)
self.assertEqual(os.get_inheritable(slave_fd), False)
class PathTConverterTests(unittest.TestCase):
# tuples of (function name, allows fd arguments, additional arguments to
# function, cleanup function)
functions = [
('stat', True, (), None),
('lstat', False, (), None),
('access', False, (os.F_OK,), None),
('chflags', False, (0,), None),
('lchflags', False, (0,), None),
('open', False, (0,), getattr(os, 'close', None)),
]
def test_path_t_converter(self):
str_filename = support.TESTFN
if os.name == 'nt':
bytes_fspath = bytes_filename = None
else:
bytes_filename = support.TESTFN.encode('ascii')
bytes_fspath = _PathLike(bytes_filename)
fd = os.open(_PathLike(str_filename), os.O_WRONLY|os.O_CREAT)
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(os.close, fd)
int_fspath = _PathLike(fd)
str_fspath = _PathLike(str_filename)
for name, allow_fd, extra_args, cleanup_fn in self.functions:
with self.subTest(name=name):
try:
fn = getattr(os, name)
except AttributeError:
continue
for path in (str_filename, bytes_filename, str_fspath,
bytes_fspath):
if path is None:
continue
with self.subTest(name=name, path=path):
result = fn(path, *extra_args)
if cleanup_fn is not None:
cleanup_fn(result)
with self.assertRaisesRegex(
TypeError, 'should be string, bytes'):
fn(int_fspath, *extra_args)
if allow_fd:
result = fn(fd, *extra_args) # should not fail
if cleanup_fn is not None:
cleanup_fn(result)
else:
with self.assertRaisesRegex(
TypeError,
'os.PathLike'):
fn(fd, *extra_args)
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
class BlockingTests(unittest.TestCase):
def test_blocking(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_blocking(fd), True)
os.set_blocking(fd, False)
self.assertEqual(os.get_blocking(fd), False)
os.set_blocking(fd, True)
self.assertEqual(os.get_blocking(fd), True)
class ExportsTests(unittest.TestCase):
def test_os_all(self):
self.assertIn('open', os.__all__)
self.assertIn('walk', os.__all__)
class TestScandir(unittest.TestCase):
check_no_resource_warning = support.check_no_resource_warning
def setUp(self):
self.path = os.path.realpath(support.TESTFN)
self.bytes_path = os.fsencode(self.path)
self.addCleanup(support.rmtree, self.path)
os.mkdir(self.path)
def create_file(self, name="file.txt"):
path = self.bytes_path if isinstance(name, bytes) else self.path
filename = os.path.join(path, name)
create_file(filename, b'python')
return filename
def get_entries(self, names):
entries = dict((entry.name, entry)
for entry in os.scandir(self.path))
self.assertEqual(sorted(entries.keys()), names)
return entries
def assert_stat_equal(self, stat1, stat2, skip_fields):
if skip_fields:
for attr in dir(stat1):
if not attr.startswith("st_"):
continue
if attr in ("st_dev", "st_ino", "st_nlink"):
continue
self.assertEqual(getattr(stat1, attr),
getattr(stat2, attr),
(stat1, stat2, attr))
else:
self.assertEqual(stat1, stat2)
def check_entry(self, entry, name, is_dir, is_file, is_symlink):
self.assertIsInstance(entry, os.DirEntry)
self.assertEqual(entry.name, name)
self.assertEqual(entry.path, os.path.join(self.path, name))
self.assertEqual(entry.inode(),
os.stat(entry.path, follow_symlinks=False).st_ino)
entry_stat = os.stat(entry.path)
self.assertEqual(entry.is_dir(),
stat.S_ISDIR(entry_stat.st_mode))
self.assertEqual(entry.is_file(),
stat.S_ISREG(entry_stat.st_mode))
self.assertEqual(entry.is_symlink(),
os.path.islink(entry.path))
entry_lstat = os.stat(entry.path, follow_symlinks=False)
self.assertEqual(entry.is_dir(follow_symlinks=False),
stat.S_ISDIR(entry_lstat.st_mode))
self.assertEqual(entry.is_file(follow_symlinks=False),
stat.S_ISREG(entry_lstat.st_mode))
self.assert_stat_equal(entry.stat(),
entry_stat,
os.name == 'nt' and not is_symlink)
self.assert_stat_equal(entry.stat(follow_symlinks=False),
entry_lstat,
os.name == 'nt')
def test_attributes(self):
link = hasattr(os, 'link')
symlink = support.can_symlink()
dirname = os.path.join(self.path, "dir")
os.mkdir(dirname)
filename = self.create_file("file.txt")
if link:
os.link(filename, os.path.join(self.path, "link_file.txt"))
if symlink:
os.symlink(dirname, os.path.join(self.path, "symlink_dir"),
target_is_directory=True)
os.symlink(filename, os.path.join(self.path, "symlink_file.txt"))
names = ['dir', 'file.txt']
if link:
names.append('link_file.txt')
if symlink:
names.extend(('symlink_dir', 'symlink_file.txt'))
entries = self.get_entries(names)
entry = entries['dir']
self.check_entry(entry, 'dir', True, False, False)
entry = entries['file.txt']
self.check_entry(entry, 'file.txt', False, True, False)
if link:
entry = entries['link_file.txt']
self.check_entry(entry, 'link_file.txt', False, True, False)
if symlink:
entry = entries['symlink_dir']
self.check_entry(entry, 'symlink_dir', True, False, True)
entry = entries['symlink_file.txt']
self.check_entry(entry, 'symlink_file.txt', False, True, True)
def get_entry(self, name):
path = self.bytes_path if isinstance(name, bytes) else self.path
entries = list(os.scandir(path))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.name, name)
return entry
def create_file_entry(self, name='file.txt'):
filename = self.create_file(name=name)
return self.get_entry(os.path.basename(filename))
def test_current_directory(self):
filename = self.create_file()
old_dir = os.getcwd()
try:
os.chdir(self.path)
# call scandir() without parameter: it must list the content
# of the current directory
entries = dict((entry.name, entry) for entry in os.scandir())
self.assertEqual(sorted(entries.keys()),
[os.path.basename(filename)])
finally:
os.chdir(old_dir)
def test_repr(self):
entry = self.create_file_entry()
self.assertEqual(repr(entry), "<DirEntry 'file.txt'>")
def test_fspath_protocol(self):
entry = self.create_file_entry()
self.assertEqual(os.fspath(entry), os.path.join(self.path, 'file.txt'))
def test_fspath_protocol_bytes(self):
bytes_filename = os.fsencode('bytesfile.txt')
bytes_entry = self.create_file_entry(name=bytes_filename)
fspath = os.fspath(bytes_entry)
self.assertIsInstance(fspath, bytes)
self.assertEqual(fspath,
os.path.join(os.fsencode(self.path),bytes_filename))
def test_removed_dir(self):
path = os.path.join(self.path, 'dir')
os.mkdir(path)
entry = self.get_entry('dir')
os.rmdir(path)
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_dir())
self.assertFalse(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_removed_file(self):
entry = self.create_file_entry()
os.unlink(entry.path)
self.assertFalse(entry.is_dir())
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_broken_symlink(self):
if not support.can_symlink():
return self.skipTest('cannot create symbolic link')
filename = self.create_file("file.txt")
os.symlink(filename,
os.path.join(self.path, "symlink.txt"))
entries = self.get_entries(['file.txt', 'symlink.txt'])
entry = entries['symlink.txt']
os.unlink(filename)
self.assertGreater(entry.inode(), 0)
self.assertFalse(entry.is_dir())
self.assertFalse(entry.is_file()) # broken symlink returns False
self.assertFalse(entry.is_dir(follow_symlinks=False))
self.assertFalse(entry.is_file(follow_symlinks=False))
self.assertTrue(entry.is_symlink())
self.assertRaises(FileNotFoundError, entry.stat)
# don't fail
entry.stat(follow_symlinks=False)
def test_bytes(self):
self.create_file("file.txt")
path_bytes = os.fsencode(self.path)
entries = list(os.scandir(path_bytes))
self.assertEqual(len(entries), 1, entries)
entry = entries[0]
self.assertEqual(entry.name, b'file.txt')
self.assertEqual(entry.path,
os.fsencode(os.path.join(self.path, 'file.txt')))
@unittest.skipUnless(os.listdir in os.supports_fd,
'fd support for listdir required for this test.')
def test_fd(self):
self.assertIn(os.scandir, os.supports_fd)
self.create_file('file.txt')
expected_names = ['file.txt']
if support.can_symlink():
os.symlink('file.txt', os.path.join(self.path, 'link'))
expected_names.append('link')
fd = os.open(self.path, os.O_RDONLY)
try:
with os.scandir(fd) as it:
entries = list(it)
names = [entry.name for entry in entries]
self.assertEqual(sorted(names), expected_names)
self.assertEqual(names, os.listdir(fd))
for entry in entries:
self.assertEqual(entry.path, entry.name)
self.assertEqual(os.fspath(entry), entry.name)
self.assertEqual(entry.is_symlink(), entry.name == 'link')
if os.stat in os.supports_dir_fd:
st = os.stat(entry.name, dir_fd=fd)
self.assertEqual(entry.stat(), st)
st = os.stat(entry.name, dir_fd=fd, follow_symlinks=False)
self.assertEqual(entry.stat(follow_symlinks=False), st)
finally:
os.close(fd)
def test_empty_path(self):
self.assertRaises(FileNotFoundError, os.scandir, '')
def test_consume_iterator_twice(self):
self.create_file("file.txt")
iterator = os.scandir(self.path)
entries = list(iterator)
self.assertEqual(len(entries), 1, entries)
# check than consuming the iterator twice doesn't raise exception
entries2 = list(iterator)
self.assertEqual(len(entries2), 0, entries2)
def test_bad_path_type(self):
for obj in [1.234, {}, []]:
self.assertRaises(TypeError, os.scandir, obj)
def test_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
iterator.close()
# multiple closes
iterator.close()
with self.check_no_resource_warning():
del iterator
def test_context_manager(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
with self.check_no_resource_warning():
del iterator
def test_context_manager_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
iterator.close()
def test_context_manager_exception(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with self.assertRaises(ZeroDivisionError):
with os.scandir(self.path) as iterator:
next(iterator)
1/0
with self.check_no_resource_warning():
del iterator
def test_resource_warning(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
with self.assertWarns(ResourceWarning):
del iterator
support.gc_collect()
# exhausted iterator
iterator = os.scandir(self.path)
list(iterator)
with self.check_no_resource_warning():
del iterator
class TestPEP519(unittest.TestCase):
# Abstracted so it can be overridden to test pure Python implementation
# if a C version is provided.
fspath = staticmethod(os.fspath)
def test_return_bytes(self):
for b in b'hello', b'goodbye', b'some/path/and/file':
self.assertEqual(b, self.fspath(b))
def test_return_string(self):
for s in 'hello', 'goodbye', 'some/path/and/file':
self.assertEqual(s, self.fspath(s))
def test_fsencode_fsdecode(self):
for p in "path/like/object", b"path/like/object":
pathlike = _PathLike(p)
self.assertEqual(p, self.fspath(pathlike))
self.assertEqual(b"path/like/object", os.fsencode(pathlike))
self.assertEqual("path/like/object", os.fsdecode(pathlike))
def test_pathlike(self):
self.assertEqual('#feelthegil', self.fspath(_PathLike('#feelthegil')))
self.assertTrue(issubclass(_PathLike, os.PathLike))
self.assertTrue(isinstance(_PathLike(), os.PathLike))
def test_garbage_in_exception_out(self):
vapor = type('blah', (), {})
for o in int, type, os, vapor():
self.assertRaises(TypeError, self.fspath, o)
def test_argument_required(self):
self.assertRaises(TypeError, self.fspath)
def test_bad_pathlike(self):
# __fspath__ returns a value other than str or bytes.
self.assertRaises(TypeError, self.fspath, _PathLike(42))
# __fspath__ attribute that is not callable.
c = type('foo', (), {})
c.__fspath__ = 1
self.assertRaises(TypeError, self.fspath, c())
# __fspath__ raises an exception.
self.assertRaises(ZeroDivisionError, self.fspath,
_PathLike(ZeroDivisionError()))
# Only test if the C version is provided, otherwise TestPEP519 already tested
# the pure Python implementation.
if hasattr(os, "_fspath"):
class TestPEP519PurePython(TestPEP519):
"""Explicitly test the pure Python implementation of os.fspath()."""
fspath = staticmethod(os._fspath)
if __name__ == "__main__":
unittest.main()
| 37.218894
| 101
| 0.595006
|
97e48efb7eaa58c4d95781afdfddf4fc5c47ab58
| 11,364
|
py
|
Python
|
mw4/logic/camera/cameraAlpaca.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 16
|
2020-01-11T22:32:26.000Z
|
2022-03-31T15:18:14.000Z
|
mw4/logic/camera/cameraAlpaca.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 196
|
2020-01-16T13:56:01.000Z
|
2022-03-29T02:06:51.000Z
|
mw4/logic/camera/cameraAlpaca.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:39:33.000Z
|
2021-05-27T13:14:20.000Z
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
import numpy as np
from astropy.io import fits
from PyQt5.QtTest import QTest
# local imports
from mountcontrol.convert import formatDstrToText
from base.alpacaClass import AlpacaClass
from base.tpool import Worker
from base.transform import JNowToJ2000
class CameraAlpaca(AlpacaClass):
"""
"""
__all__ = ['CameraAlpaca']
def __init__(self, app=None, signals=None, data=None):
super().__init__(app=app, data=data, threadPool=app.threadPool)
self.signals = signals
self.data = data
self.abortExpose = False
def workerGetInitialConfig(self):
"""
:return: true for test purpose
"""
super().workerGetInitialConfig()
self.getAndStoreAlpacaProperty('cameraxsize', 'CCD_INFO.CCD_MAX_X')
self.getAndStoreAlpacaProperty('cameraysize', 'CCD_INFO.CCD_MAX_Y')
self.getAndStoreAlpacaProperty('canfastreadout', 'CAN_FAST')
self.getAndStoreAlpacaProperty('canabortexposure', 'CAN_ABORT')
self.getAndStoreAlpacaProperty('cansetccdtemperature', 'CAN_SET_CCD_TEMPERATURE')
self.getAndStoreAlpacaProperty('cangetcoolerpower', 'CAN_GET_COOLER_POWER')
self.getAndStoreAlpacaProperty('pixelsizex', 'CCD_INFO.CCD_PIXEL_SIZE_X')
self.getAndStoreAlpacaProperty('pixelsizey', 'CCD_INFO.CCD_PIXEL_SIZE_Y')
self.getAndStoreAlpacaProperty('maxbinx', 'CCD_BINNING.HOR_BIN_MAX')
self.getAndStoreAlpacaProperty('maxbiny', 'CCD_BINNING.VERT_BIN_MAX')
self.getAndStoreAlpacaProperty('gainmax', 'CCD_INFO.GAIN_MAX')
self.getAndStoreAlpacaProperty('gainmin', 'CCD_INFO.GAIN_MIN')
self.getAndStoreAlpacaProperty('startx', 'CCD_FRAME.X')
self.getAndStoreAlpacaProperty('starty', 'CCD_FRAME.Y')
self.log.debug(f'Initial data: {self.data}')
return True
def workerPollData(self):
"""
:return: true for test purpose
"""
self.getAndStoreAlpacaProperty('binx', 'CCD_BINNING.HOR_BIN')
self.getAndStoreAlpacaProperty('biny', 'CCD_BINNING.VERT_BIN')
self.getAndStoreAlpacaProperty('camerastate', 'CAMERA.STATE')
self.getAndStoreAlpacaProperty('gain', 'CCD_GAIN.GAIN')
self.getAndStoreAlpacaProperty('offset', 'CCD_OFFSET.OFFSET')
self.getAndStoreAlpacaProperty('fastreadout',
'READOUT_QUALITY.QUALITY_LOW',
'READOUT_QUALITY.QUALITY_HIGH')
self.getAndStoreAlpacaProperty('ccdtemperature',
'CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE')
self.getAndStoreAlpacaProperty('cooleron', 'CCD_COOLER.COOLER_ON')
self.getAndStoreAlpacaProperty('coolerpower',
'CCD_COOLER_POWER.CCD_COOLER_VALUE')
return True
def sendDownloadMode(self, fastReadout=False):
"""
setDownloadMode sets the readout speed of the camera
:return: success
"""
canFast = self.data.get('CAN_FAST', False)
if not canFast:
return False
if fastReadout:
self.setAlpacaProperty('fastreadout', FastReadout=True)
quality = 'High' if self.data.get('READOUT_QUALITY.QUALITY_HIGH', True) else 'Low'
self.log.debug(f'Camera has readout quality entry: {quality}')
return True
def workerExpose(self,
imagePath='',
expTime=3,
binning=1,
fastReadout=True,
posX=0,
posY=0,
width=1,
height=1,
focalLength=1,
):
"""
:param imagePath:
:param expTime:
:param binning:
:param fastReadout:
:param posX:
:param posY:
:param width:
:param height:
:param focalLength:
:return: success
"""
self.sendDownloadMode(fastReadout=fastReadout)
self.setAlpacaProperty('binx', BinX=int(binning))
self.setAlpacaProperty('biny', iBinY=int(binning))
self.setAlpacaProperty('startx', StartX=int(posX / binning))
self.setAlpacaProperty('starty', StartY=int(posY / binning))
self.setAlpacaProperty('numx', NumX=int(width / binning))
self.setAlpacaProperty('numy', NumX=int(width / binning))
isMount = self.app.deviceStat['mount']
if isMount:
ra = self.app.mount.obsSite.raJNow
dec = self.app.mount.obsSite.decJNow
obsTime = self.app.mount.obsSite.timeJD
if ra is not None and dec is not None and obsTime is not None:
ra, dec = JNowToJ2000(ra, dec, obsTime)
self.setAlpacaProperty('startexposure', Duration=expTime, Light=True)
timeLeft = expTime
while not self.getAlpacaProperty('imageready'):
text = f'expose {timeLeft:3.0f} s'
QTest.qWait(100)
if timeLeft >= 0.1:
timeLeft -= 0.1
else:
timeLeft = 0
self.signals.message.emit(text)
if self.abortExpose:
break
if not self.abortExpose:
self.signals.integrated.emit()
self.signals.message.emit('download')
tmp = self.getAlpacaProperty('imagearray')
if tmp is None:
self.abortExpose = True
else:
data = np.array(tmp, dtype=np.uint16).transpose()
if not self.abortExpose:
self.signals.downloaded.emit()
self.signals.message.emit('saving')
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
header.append(('OBJECT', 'SKY_OBJECT', 'default name from MW4'))
header.append(('FRAME', 'Light', 'Modeling works with light frames'))
header.append(('EQUINOX', 2000, 'All data is stored in J2000'))
header.append(('PIXSIZE1', self.data['CCD_INFO.CCD_PIXEL_SIZE_X'] * binning))
header.append(('PIXSIZE2', self.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] * binning))
header.append(('XPIXSZ', self.data['CCD_INFO.CCD_PIXEL_SIZE_X'] * binning))
header.append(('YPIXSZ', self.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] * binning))
if focalLength:
factor = binning / focalLength * 206.265
header.append(('FOCALLEN', focalLength,
'Data taken from driver or manual input'))
else:
factor = 1
header.append(('SCALE', self.data['CCD_INFO.CCD_PIXEL_SIZE_X'] * factor))
header.append(('XBINNING',
binning, 'MW4 is using the same binning for x and y'))
header.append(('YBINNING',
binning, 'MW4 is using the same binning for x and y'))
header.append(('EXPTIME', expTime))
header.append(('OBSERVER', 'MW4'))
timeJD = self.app.mount.obsSite.timeJD
header.append(('DATE-OBS', timeJD.tt_strftime('%Y-%m-%dT%H:%M:%S'),
'Time is UTC of mount'))
header.append(('CCD-TEMP',
self.data.get('CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE', 0)))
header.append(('SQM',
self.app.skymeter.data.get('SKY_QUALITY.SKY_BRIGHTNESS', 0)))
if isMount:
header.append(('RA', ra._degrees, 'Float value in degree'))
header.append(('DEC', dec.degrees, 'Float value in degree'))
header.append(('TELESCOP',
self.app.mount.firmware.product,
'Mount version from firmware'))
lat = self.app.mount.obsSite.location.latitude
header.append(('SITELAT', formatDstrToText(lat)))
lon = self.app.mount.obsSite.location.longitude
header.append(('SITELON', formatDstrToText(lon)))
elev = self.app.mount.obsSite.location.elevation.m
header.append(('SITEELEV', elev))
hdu.writeto(imagePath, overwrite=True, output_verify='silentfix+warn')
self.log.info(f'Saved Image: [{imagePath}]')
if self.abortExpose:
imagePath = ''
self.signals.saved.emit(imagePath)
self.signals.exposeReady.emit()
self.signals.message.emit('')
return True
def expose(self,
imagePath='',
expTime=3,
binning=1,
fastReadout=True,
posX=0,
posY=0,
width=1,
height=1,
focalLength=1,
):
"""
:return: success
"""
self.abortExpose = False
worker = Worker(self.workerExpose,
imagePath=imagePath,
expTime=expTime,
binning=binning,
fastReadout=fastReadout,
posX=posX,
posY=posY,
width=width,
height=height,
focalLength=focalLength)
self.threadPool.start(worker)
return True
def abort(self):
"""
:return: success
"""
if not self.deviceConnected:
return False
self.abortExpose = True
canAbort = self.data.get('CAN_ABORT', False)
if not canAbort:
return False
self.getAlpacaProperty('stopexposure')
return True
def sendCoolerSwitch(self, coolerOn=False):
"""
:param coolerOn:
:return: success
"""
if not self.deviceConnected:
return False
self.setAlpacaProperty('cooleron', CoolerOn=coolerOn)
return True
def sendCoolerTemp(self, temperature=0):
"""
:param temperature:
:return: success
"""
if not self.deviceConnected:
return False
canSetCCDTemp = self.data.get('CAN_SET_CCD_TEMPERATURE', False)
if not canSetCCDTemp:
return False
self.setAlpacaProperty('setccdtemperature', SetCCDTemperature=temperature)
return True
def sendOffset(self, offset=0):
"""
:param offset:
:return: success
"""
if not self.deviceConnected:
return False
self.setAlpacaProperty('offset', Offset=offset)
return True
def sendGain(self, gain=0):
"""
:param gain:
:return: success
"""
if not self.deviceConnected:
return False
self.setAlpacaProperty('gain', Gain=gain)
return True
| 36.191083
| 90
| 0.556582
|
4ce2a354338658947c4bdb4444d2c8634d1ed5d6
| 24,310
|
py
|
Python
|
opencolorio_config_aces/config/generation/common.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
opencolorio_config_aces/config/generation/common.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
opencolorio_config_aces/config/generation/common.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
"""
OpenColorIO Config Generation Common Objects
============================================
Defines various objects related to *OpenColorIO* config generation:
- :class:`opencolorio_config_aces.VersionData`
- :class:`opencolorio_config_aces.ConfigData`
- :class:`opencolorio_config_aces.serialize_config_data`
- :class:`opencolorio_config_aces.deserialize_config_data`
- :func:`opencolorio_config_aces.validate_config`
- :func:`opencolorio_config_aces.generate_config`
"""
import logging
import PyOpenColorIO as ocio
from collections.abc import Mapping
from dataclasses import asdict, dataclass, field
from typing import Union
from opencolorio_config_aces.utilities import required
from opencolorio_config_aces.config.generation import (
colorspace_factory,
look_factory,
named_transform_factory,
view_transform_factory,
)
__author__ = "OpenColorIO Contributors"
__copyright__ = "Copyright Contributors to the OpenColorIO Project."
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "OpenColorIO Contributors"
__email__ = "ocio-dev@lists.aswf.io"
__status__ = "Production"
__all__ = [
"VersionData",
"ConfigData",
"deserialize_config_data",
"serialize_config_data",
"validate_config",
"generate_config",
]
logger = logging.getLogger(__name__)
@dataclass
class VersionData:
"""
Define the data container for a two component version identifier.
Parameters
----------
major : int, optional
Major version number.
minor : int, optional
Minor version number.
Attributes
----------
major
minor
"""
major: int = 1
minor: int = 0
@dataclass
class ConfigData:
"""
Define the data container for an *OpenColorIO* config.
Parameters
----------
profile_version : VersionData, optional
Config major and minor version, i.e. (1, 0) or (2, 0).
description : unicode, optional
Config description.
search_path : list, optional
Config search path.
roles : dict
Config roles, a dict of role and colorspace name.
colorspaces : array_like
Config colorspaces, an iterable of
:attr:`PyOpenColorIO.ColorSpace` class instances or mappings to create
them with :func:`opencolorio_config_aces.colorspace_factory`
definition.
named_transforms : array_like
Config named transforms, an iterable of
:attr:`PyOpenColorIO.NamedTransfom` class instances or mappings to
create them with
:func:`opencolorio_config_aces.named_transform_factory` definition.
view_transforms : array_like, optional
Config view transforms, an iterable of
:attr:`PyOpenColorIO.ViewTransform` class instances or mappings to
create them with :func:`opencolorio_config_aces.view_transform_factory`
definition.
looks : array_like, optional
Config looks, an iterable of :attr:`PyOpenColorIO.Look` class
instances or mappings to create them with
:func:`opencolorio_config_aces.look_factory` definition.
shared_views : array_like, optional
Config shared views, an iterable of dicts of view, view transform,
colorspace and rule names, iterable of looks and description.
views : array_like, optional
Config views, an iterable of dicts of display, view
and colorspace names.
active_displays : array_like, optional
Config active displays, an iterable of display names.
active_views : array_like, optional
Config active displays, an iterable of view names.
file_rules : array_like, optional
Config file rules, a dict of file rules.
viewing_rules : array_like, optional
Config viewing rules, a dict of viewing rules.
inactive_colorspaces : array_like, optional
Config inactive colorspaces an iterable of colorspace names.
default_view_transform : unicode, optional
Name of the default view transform.
Attributes
----------
schema_version
profile_version
description
search_path
roles
colorspaces
named_transforms
view_transforms
looks
shared_views
views
active_displays
active_views
file_rules
viewing_rules
inactive_colorspaces
default_view_transform
"""
schema_version: VersionData = VersionData(1, 0)
profile_version: VersionData = VersionData(2, 0)
description: str = (
'An "OpenColorIO" config generated by "OpenColorIO-Config-ACES".'
)
search_path: Union[list] = field(default_factory=list)
roles: Union[dict] = field(default_factory=dict)
colorspaces: Union[list] = field(default_factory=list)
named_transforms: Union[list] = field(default_factory=list)
view_transforms: Union[list] = field(default_factory=list)
looks: Union[list] = field(default_factory=list)
shared_views: Union[list] = field(default_factory=list)
views: Union[list] = field(default_factory=list)
active_displays: Union[list] = field(default_factory=list)
active_views: Union[list] = field(default_factory=list)
file_rules: Union[list] = field(default_factory=list)
viewing_rules: Union[list] = field(default_factory=list)
inactive_colorspaces: Union[list] = field(default_factory=list)
default_view_transform: str = field(default_factory=str)
@required("jsonpickle")
def deserialize_config_data(path):
"""
Deserialize the *JSON* *OpenColorIO* config data container at given path.
Parameters
----------
path : unicode
*JSON* file path.
Returns
-------
ConfigData
Deserialized *JSON* *OpenColorIO* config data container.
"""
import jsonpickle
with open(path) as config_json:
return ConfigData(**jsonpickle.decode(config_json.read()))
# TODO: Implement schema verification support for serialized data.
@required("jsonpickle")
def serialize_config_data(data, path):
"""
Serialize the *OpenColorIO* config data container as a *JSON* file.
Parameters
----------
data : ConfigData
*OpenColorIO* config data container to serialize.
path : unicode
*JSON* file path.
"""
import jsonpickle
with open(path, "w") as config_json:
config_json.write(jsonpickle.encode(asdict(data), indent=2))
def validate_config(config):
"""
Validate given *OpenColorIO* config.
Parameters
----------
config : Config
*OpenColorIO* config to validate.
Returns
-------
bool
Whether the *OpenColorIO* config is valid.
"""
try:
config.validate()
return True
except Exception as error:
logger.critical(error)
return False
def generate_config(data, config_name=None, validate=True, base_config=None):
"""
Generate the *OpenColorIO* config from given data.
Parameters
----------
data : ConfigData
*OpenColorIO* config data.
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
base_config : bool, optional
*OpenColorIO* base config inherited for initial data.
Returns
-------
Config
*OpenColorIO* config.
"""
if base_config is not None:
config = base_config
else:
config = ocio.Config()
config.setVersion(
data.profile_version.major, data.profile_version.minor
)
if data.description is not None:
config.setDescription(data.description)
for search_path in data.search_path:
logger.debug(f'Adding "{search_path}".')
config.addSearchPath(search_path)
for role, colorspace in data.roles.items():
logger.debug(f'Adding "{colorspace}" colorspace as "{role}" role.')
config.setRole(role, colorspace)
for colorspace in data.colorspaces:
if isinstance(colorspace, Mapping):
colorspace = colorspace_factory(**colorspace)
logger.debug(f'Adding "{colorspace.getName()}" colorspace.')
config.addColorSpace(colorspace)
for named_transform in data.named_transforms:
if isinstance(named_transform, Mapping):
named_transform = named_transform_factory(**named_transform)
logger.debug(f'Adding "{named_transform.getName()}" named transform.')
config.addNamedTransform(named_transform)
for view_transform in data.view_transforms:
if isinstance(view_transform, Mapping):
view_transform = view_transform_factory(**view_transform)
logger.debug(f'Adding "{view_transform.getName()}" view transform.')
config.addViewTransform(view_transform)
for look in data.looks:
if isinstance(look, Mapping):
look = look_factory(**look)
logger.debug(f'Adding "{look.getName()}" look.')
config.addLook(look)
if data.profile_version.major >= 2:
logger.debug(f'Disabling "{data.inactive_colorspaces}" colorspaces.')
config.setInactiveColorSpaces(",".join(data.inactive_colorspaces))
for shared_view in data.shared_views:
display_colorspace = shared_view.get(
"display_colorspace", "<USE_DISPLAY_NAME>"
)
looks = shared_view.get("looks")
view_transform = shared_view.get("view_transform")
rule = shared_view.get("rule")
description = shared_view.get("description")
view = shared_view["view"]
logger.debug(
f'Adding "{view}" shared view using "{view_transform}" '
f'view transform, "{display_colorspace}" display colorspace, '
f'"{looks}" looks, "{rule}" rule and "{description}"'
f"description."
)
config.addSharedView(
view, view_transform, display_colorspace, looks, rule, description
)
for view in data.views:
display = view["display"]
colorspace = view.get("colorspace")
looks = view.get("looks")
view_transform = view.get("view_transform")
display_colorspace = view.get("display_colorspace")
rule = view.get("rule")
description = view.get("description")
view = view["view"]
if colorspace is not None:
logger.debug(
f'Adding "{view}" view to "{display}" display '
f'using "{colorspace}" colorspace.'
)
config.addDisplayView(display, view, colorspace, looks)
elif view_transform is not None and display_colorspace is not None:
logger.debug(
f'Adding "{view}" view to "{display}" display '
f'using "{view_transform}" view transform, '
f'"{display_colorspace}" display colorspace, '
f'"{rule}" rule and "{description}" description.'
)
config.addDisplayView(
display,
view,
view_transform,
display_colorspace,
looks,
rule,
description,
)
else:
logger.debug(f'Adding "{view}" view to "{display}" display.')
config.addDisplaySharedView(display, view)
if data.active_displays:
logger.debug(f'Activating "{data.active_displays}" displays.')
config.setActiveDisplays(",".join(data.active_displays))
if data.active_views:
logger.debug(f'Activating "{data.active_views}" views.')
config.setActiveViews(",".join(data.active_views))
if data.file_rules:
file_rules = ocio.FileRules()
rule_index = 0
for file_rule in reversed(data.file_rules):
name = file_rule["name"]
colorspace = file_rule["colorspace"]
regex = file_rule.get("regex")
pattern = file_rule.get("pattern")
extension = file_rule.get("extension")
if name == "Default":
logger.debug(
f'Setting "{name}" file rule with '
f'"{colorspace}" colorspace.'
)
file_rules.setDefaultRuleColorSpace(colorspace)
elif regex:
logger.debug(
f'Adding "{name}" file rule with '
f'"{regex}" regex pattern for '
f'"{colorspace}" colorspace.'
)
file_rules.insertRule(rule_index, name, colorspace, regex)
rule_index += 1
else:
logger.debug(
f'Adding "{name}" file rule with '
f'"{pattern}" pattern and "{extension}" extension '
f'for "{colorspace}" colorspace.'
)
file_rules.insertRule(
rule_index, name, colorspace, pattern, extension
)
rule_index += 1
config.setFileRules(file_rules)
if data.viewing_rules:
viewing_rules = ocio.ViewingRules()
for i, viewing_rule in enumerate(reversed(data.viewing_rules)):
logger.warning("Inserting a viewing rule is not supported yet!")
# viewing_rules.insertRule()
config.setViewingRules(viewing_rules)
if data.default_view_transform is not None:
config.setDefaultViewTransformName(data.default_view_transform)
if validate:
validate_config(config)
if config_name is not None:
with open(config_name, "w") as file:
file.write(config.serialize())
return config
if __name__ == "__main__":
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(
opencolorio_config_aces.__path__[0],
"..",
"build",
"config",
"common",
"tests",
)
logger.info(f'Using "{build_directory}" build directory...')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
# "OpenColorIO 1" configuration.
colorspace_1 = {"name": "Gamut - sRGB", "family": "Gamut"}
colorspace_2 = {
"name": "CCTF - sRGB",
"family": "CCTF",
"description": (
'WARNING: The sRGB "EOTF" is purposely incorrect and '
"only a placeholder!"
),
"to_reference": {
"transform_type": "ExponentTransform",
"value": [2.2, 2.2, 2.2, 1],
},
}
colorspace_3 = {
"name": "Colorspace - sRGB",
"family": "Colorspace",
"to_reference": {
"transform_type": "ColorSpaceTransform",
"src": "CCTF - sRGB",
"dst": "Gamut - sRGB",
},
}
colorspace_4 = colorspace_factory(
**{"name": "Utility - Raw", "family": "Utility", "is_data": True}
)
_red_cdl_transform = ocio.CDLTransform()
_red_cdl_transform.setSlope([0, 0, 0])
_red_cdl_transform.setOffset([1, 0, 0])
look_1 = look_factory("Look - Red", forward_transform=_red_cdl_transform)
look_2 = {
"name": "Look - Green",
"forward_transform": {
"transform_type": "CDLTransform",
"slope": [0, 0, 0],
"offset": [0, 1, 0],
},
}
_gain_cdl_transform = ocio.CDLTransform()
_gain_cdl_transform.setSlope([0.5, 0.5, 0.5])
look_3 = {
"name": "Look - Quarter Blue",
"forward_transform": [ # Note the nested "GroupTransform"s.
[
{
"transform_type": "CDLTransform",
"slope": [0, 0, 0],
"offset": [0, 1, 0],
},
_gain_cdl_transform,
],
_gain_cdl_transform,
],
}
display_1 = {
"name": "View - sRGB Monitor - sRGB",
"family": "View",
"base_colorspace": colorspace_3,
}
data = ConfigData(
roles={ocio.ROLE_SCENE_LINEAR: "Gamut - sRGB"},
colorspaces=[
colorspace_1,
colorspace_2,
colorspace_3,
colorspace_4,
display_1,
],
looks=[look_1, look_2, look_3],
views=[
{
"display": "sRGB Monitor",
"view": "sRGB - sRGB",
"colorspace": display_1["name"],
},
{
"display": "sRGB Monitor",
"view": "Raw",
"colorspace": colorspace_4.getName(),
},
],
active_displays=["sRGB Monitor"],
active_views=["sRGB - sRGB"],
)
generate_config(data, os.path.join(build_directory, "config-v1.ocio"))
# TODO: Pickling "PyOpenColorIO.ColorSpace" fails on early "PyOpenColorIO"
# versions.
try:
serialize_config_data(
data, os.path.join(build_directory, "config-v1.json")
)
except TypeError as error:
logger.critical(error)
# "OpenColorIO 2" configuration.
colorspace_1 = {
"name": "ACES - ACES2065-1",
"family": "ACES",
"aliases": "lin_ap0",
}
colorspace_2 = {
"name": "ACES - ACEScg",
"family": "ACES",
"to_reference": {
"transform_type": "BuiltinTransform",
"style": "ACEScg_to_ACES2065-1",
},
"aliases": ["lin_ap1"],
}
colorspace_3 = {
"name": "Gamut - sRGB",
"family": "Gamut",
"to_reference": {
"transform_type": "MatrixTransform",
"matrix": [
0.4387956642,
0.3825367756,
0.1787151431,
0.0000000000,
0.0890560064,
0.8126211313,
0.0982957371,
0.0000000000,
0.0173063724,
0.1083658908,
0.8742745984,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
1.0000000000,
],
},
}
colorspace_4 = {
"name": "CCTF - sRGB",
"family": "CCTF",
"to_reference": {
"transform_type": "ExponentWithLinearTransform",
"gamma": [2.4, 2.4, 2.4, 1],
"offset": [0.055, 0.055, 0.055, 0],
},
}
colorspace_5 = {
"name": "Utility - Raw",
"family": "Utility",
"is_data": True,
}
named_transform_1 = {
"name": "+1 Stop",
"family": "Exposure",
"forward_transform": {
"transform_type": "MatrixTransform",
"matrix": [
2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
1.0000000000,
],
},
}
look_1 = {
"name": "Look - Red",
"forward_transform": {
"transform_type": "CDLTransform",
"slope": [0, 0, 0],
"offset": [1, 0, 0],
},
}
interchange = {"name": "CIE-XYZ D65"}
display_1 = {
"name": "sRGB Monitor",
"from_reference": {
"transform_type": "BuiltinTransform",
"style": "DISPLAY - CIE-XYZ-D65_to_sRGB",
},
"reference_space": "REFERENCE_SPACE_DISPLAY",
}
display_2 = {
"name": "ITU-R BT.1886 Monitor",
"from_reference": {
"transform_type": "BuiltinTransform",
"style": "DISPLAY - CIE-XYZ-D65_to_REC.1886-REC.709",
},
"reference_space": "REFERENCE_SPACE_DISPLAY",
}
view_transform_1 = {
"name": "ACES Output - SDR Video - 1.0",
"from_reference": {
"transform_type": "BuiltinTransform",
"style": "ACES-OUTPUT - ACES2065-1_to_CIE-XYZ-D65 - SDR-VIDEO_1.0",
},
}
view_transform_2 = {
"name": "Output - No Tonescale",
"from_reference": {
"transform_type": "BuiltinTransform",
"style": "UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD",
},
}
displays = (display_1, display_2)
view_transforms = (view_transform_1, view_transform_2)
shared_views = [
{
"display": display["name"],
"view": view_transform["name"],
"view_transform": view_transform["name"],
}
for display in displays
for view_transform in view_transforms
]
data = ConfigData(
profile_version=VersionData(2, 0),
roles={
"aces_interchange": "ACES - ACES2065-1",
"cie_xyz_d65_interchange": "CIE-XYZ D65",
ocio.ROLE_DEFAULT: "ACES - ACES2065-1",
ocio.ROLE_SCENE_LINEAR: colorspace_2["name"],
},
colorspaces=[
colorspace_1,
colorspace_2,
colorspace_3,
colorspace_4,
colorspace_5,
interchange,
display_1,
display_2,
],
named_transforms=[named_transform_1],
looks=[look_1],
view_transforms=[view_transform_1, view_transform_2],
inactive_colorspaces=["CIE-XYZ D65"],
shared_views=shared_views,
views=shared_views
+ [
{
"display": display["name"],
"view": "Raw",
"colorspace": "Utility - Raw",
}
for display in displays
],
active_displays=[display_1["name"], display_2["name"]],
active_views=[
view_transform["name"] for view_transform in view_transforms
]
+ ["Raw"],
file_rules=[
{
"name": "Linear - sRGB",
"colorspace": "Gamut - sRGB",
"regex": "_[sS][rR][gG][bB]\\.([eE][xX][rR]|[hH][dD][rR])$",
},
{
"name": "EOTF - sRGB",
"colorspace": "CCTF - sRGB",
"regex": "_[sS][rR][gG][bB]\\.([pP][nN][gG]|[tT][iI][fF])$",
},
{"name": "Default", "colorspace": "ACES - ACES2065-1"},
],
viewing_rules=[],
)
config = generate_config(
data, os.path.join(build_directory, "config-v2.ocio")
)
# TODO: Pickling "PyOpenColorIO.ColorSpace" fails on early "PyOpenColorIO"
# versions.
try:
serialize_config_data(
data, os.path.join(build_directory, "config-v2.json")
)
except TypeError as error:
logger.critical(error)
named_transform_2 = {
"name": "-1 Stop",
"family": "Exposure",
"forward_transform": {
"transform_type": "MatrixTransform",
"matrix": [
-2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
-2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
-2.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
0.0000000000,
1.0000000000,
],
},
}
data = ConfigData(named_transforms=[named_transform_2])
generate_config(
data,
os.path.join(build_directory, "config-v2-with-named-transform.ocio"),
base_config=config,
)
# TODO: Pickling "PyOpenColorIO.ColorSpace" fails on early "PyOpenColorIO"
# versions.
try:
serialize_config_data(
data,
os.path.join(
build_directory, "config-v2-with-named-transform.json"
),
)
except TypeError as error:
logger.critical(error)
| 30.772152
| 79
| 0.574044
|
6062406330a254d802b58d272f8336120fb4129f
| 7,916
|
py
|
Python
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/yaml/resolver.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/yaml/resolver.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/yaml/resolver.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if index_check in [False, None] and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
['<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
['='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| 37.516588
| 82
| 0.541056
|
7e3aa5b537ce2ac7c92e8f19fe7c336ad045ef1d
| 9,456
|
py
|
Python
|
daedalus/PopulationSynthesis/static.py
|
niklomax/daedalus
|
4936f181050dc6c3eaf564b1a03e9f0b45396e39
|
[
"MIT"
] | 4
|
2020-05-13T10:54:32.000Z
|
2021-12-17T11:00:42.000Z
|
daedalus/PopulationSynthesis/static.py
|
niklomax/daedalus
|
4936f181050dc6c3eaf564b1a03e9f0b45396e39
|
[
"MIT"
] | 30
|
2022-02-14T11:21:04.000Z
|
2022-03-30T11:00:22.000Z
|
minos/PopulationSynthesis/static.py
|
Leeds-MRG/Minos
|
6273dc718a7a1e8513b0c31e26269ff8f9720ae1
|
[
"MIT"
] | 2
|
2021-01-12T10:49:33.000Z
|
2021-01-20T13:11:36.000Z
|
"""
Microsimulation by a sequence of microsynthesised populations
"""
import numpy as np
import pandas as pd
# from random import randint
#import humanleague as hl
import ukpopulation.nppdata as nppdata
import ukpopulation.snppdata as snppdata
import ukpopulation.customsnppdata as customsnppdata
import ukpopulation.myedata as myedata
import microsimulation.utils as utils
import microsimulation.common as common
class SequentialMicrosynthesis(common.Base):
"""
Static microsimulation based on a sequence of microsyntheses
Performs a sequence of static microsyntheses using census data as a seed populations and mid-year-estimates as marginal
constraints. This is the simplest microsimulation model and is intended as a comparison/calibration for Monte-Carlo
based microsimulation
"""
def __init__(self, region, resolution, variant, is_custom=False, cache_dir="./cache", output_dir="./data",
fast_mode=False):
common.Base.__init__(self, region, resolution, cache_dir)
self.output_dir = output_dir
self.fast_mode = fast_mode
self.variant = variant
self.is_custom = is_custom
# init the population (projections) modules
self.mye_api = myedata.MYEData(cache_dir)
self.npp_api = nppdata.NPPData(cache_dir)
if self.is_custom:
if variant not in customsnppdata.list_custom_projections(cache_dir):
raise ValueError("Requested custom SNPP %s is not in the cache directory (%s)" % (variant, cache_dir))
print("Using custom SNPP variant %s" % variant)
print("NOTE: assuming custom SNPP variant disables rescaling to national variant")
self.snpp_api = customsnppdata.CustomSNPPData(variant, cache_dir)
else:
self.snpp_api = snppdata.SNPPData(cache_dir)
# validation
if not is_custom and self.variant not in nppdata.NPPData.VARIANTS:
raise ValueError(self.variant + " is not a known projection variant")
if not isinstance(self.fast_mode, bool):
raise ValueError("fast mode should be boolean")
# TODO enable 2001 ref year?
# (down)load the census 2011 tables
self.__get_census_data()
def run(self, ref_year, target_year):
"""
Run the sequence
"""
# TODO enable 2001 ref year?
if ref_year != 2011:
raise ValueError("(census) reference year must be 2011")
if target_year < 2001:
raise ValueError("2001 is the earliest supported target year")
if target_year > self.npp_api.max_year():
raise ValueError(str(self.npp_api.max_year()) + " is the current latest supported end year")
if self.fast_mode:
print("Running in fast mode. Rounded IPF populations may not exactly match the marginals")
print("Starting microsynthesis sequence...")
for year in utils.year_sequence(ref_year, target_year):
out_file = self.output_dir + "/ssm_" + self.region + "_" + self.resolution + "_" + self.variant + "_" + str(
year) + ".csv"
# this is inconsistent with the household microsynth (batch script checks whether output exists)
# TODO make them consistent?
# With dynamic update of seed for now just recompute even if file exists
# if not os.path.isfile(out_file):
if year < self.snpp_api.min_year(self.region):
source = " [MYE]"
elif year <= self.snpp_api.max_year(self.region):
source = " [SNPP]"
else:
source = " [XNPP]"
print("Generating ", out_file, source, "... ",
sep="", end="", flush=True)
msynth = self.__microsynthesise(year)
print("OK")
msynth.to_csv(out_file, index_label="PID")
def __microsynthesise(self, year): # LAD=self.region
# Census/seed proportions for geography and ethnicity
oa_prop = self.seed.sum((1, 2, 3)) / self.seed.sum()
eth_prop = self.seed.sum((0, 1, 2)) / self.seed.sum()
if year < self.snpp_api.min_year(self.region):
age_sex = utils.create_age_sex_marginal(utils.adjust_pp_age(self.mye_api.filter(self.region, year)),
self.region)
elif year <= self.npp_api.max_year():
# Don't attempt to apply NPP variant if before the start of the NPP data, or it's a custom SNPP
if year < self.npp_api.min_year() or self.is_custom:
age_sex = utils.create_age_sex_marginal(utils.adjust_pp_age(self.snpp_api.filter(self.region, year)),
self.region)
else:
age_sex = utils.create_age_sex_marginal(
utils.adjust_pp_age(self.snpp_api.create_variant(self.variant, self.npp_api, self.region, year)),
self.region)
else:
raise ValueError("Cannot microsimulate past NPP horizon year ({})", self.npp_api.max_year())
# convert proportions/probabilities to integer frequencies
oa = hl.prob2IntFreq(oa_prop, age_sex.sum())["freq"]
eth = hl.prob2IntFreq(eth_prop, age_sex.sum())["freq"]
# combine the above into a 2d marginal using QIS-I and census 2011 or later data as the seed
oa_eth = hl.qisi(self.seed.sum((1, 2)), [np.array([0]), np.array([1])], [oa, eth])
if not (isinstance(oa_eth, dict) and oa_eth["conv"]):
raise RuntimeError("oa_eth did not converge")
# now the full seeded microsynthesis
if self.fast_mode:
msynth = hl.ipf(self.seed, [np.array([0, 3]), np.array([1, 2])],
[oa_eth["result"].astype(float), age_sex.astype(float)])
else:
msynth = hl.qisi(self.seed, [np.array([0, 3]), np.array([1, 2])], [oa_eth["result"], age_sex])
if not msynth["conv"]:
print(msynth)
raise RuntimeError("msynth did not converge")
# print(msynth["pop"])
if self.fast_mode:
print("updating seed to", year, " ", end="")
self.seed = msynth["result"]
msynth["result"] = np.around(msynth["result"]).astype(int)
else:
print("updating seed to", year, " ", end="")
self.seed = msynth["result"].astype(float)
rawtable = hl.flatten(msynth["result"]) # , c("OA", "SEX", "AGE", "ETH"))
# col names and remapped values
table = pd.DataFrame(columns=["Area", "DC1117EW_C_SEX", "DC1117EW_C_AGE", "DC2101EW_C_ETHPUK11"])
table.Area = utils.remap(rawtable[0], self.geog_map)
table.DC1117EW_C_SEX = utils.remap(rawtable[1], [1, 2])
table.DC1117EW_C_AGE = utils.remap(rawtable[2], range(1, 87))
table.DC2101EW_C_ETHPUK11 = utils.remap(rawtable[3], self.eth_map)
# consistency checks (in fast mode just report discrepancies)
self.__check(table, age_sex, oa_eth["result"])
return table
def __check(self, table, age_sex, oa_eth):
failures = []
# check area totals
areas = oa_eth.sum(1)
for i in range(0, len(areas)):
if len(table[table.Area == self.geog_map[i]]) != areas[i]:
failures.append("Area " + self.geog_map[i] + " total mismatch: "
+ str(len(table[table.Area == self.geog_map[i]])) + " vs " + str(areas[i]))
# check ethnicity totals
eths = oa_eth.sum(0)
for i in range(0, len(eths)):
if len(table[table.DC2101EW_C_ETHPUK11 == self.eth_map[i]]) != eths[i]:
failures.append("Ethnicity " + str(self.eth_map[i]) + " total mismatch: "
+ str(len(table[table.DC2101EW_C_ETHPUK11 == self.eth_map[i]])) + " vs " + str(eths[i]))
# check gender and age totals
for sex in [0, 1]:
for age in range(0, 86):
# print( len(table[(table.DC1117EW_C_SEX == s+1) & (table.DC1117EW_C_AGE == a+1)]), age_sex[s,a])
if len(table[(table.DC1117EW_C_SEX == sex + 1) & (table.DC1117EW_C_AGE == age + 1)]) != age_sex[
sex, age]:
failures.append("Age-gender " + str(age + 1) + "/" + str(sex + 1) + " total mismatch: "
+ str(
len(table[(table.DC1117EW_C_SEX == sex + 1) & (table.DC1117EW_C_AGE == age + 1)]))
+ " vs " + str(age_sex[sex, age]))
if failures and not self.fast_mode:
print("\n".join(failures))
raise RuntimeError("Consistency checks failed, see log for further details")
def __get_census_data(self):
(dc1117, dc2101, dc6206) = self.get_census_data()
# add children to adult-only table
# dc6206ew_adj = self.append_children(dc1117, dc6206)
# For now we drop NS-SEC (not clear if needed)
dc6206_adj = None
self.geog_map = dc1117.GEOGRAPHY_CODE.unique()
self.eth_map = dc2101.C_ETHPUK11.unique()
# self.nssec_map = dc6206ew_adj.C_NSSEC.unique()
# TODO seed with microdata
self.cen11 = utils.microsynthesise_seed(dc1117, dc2101, dc6206_adj)
# seed defaults to census 11 data, updates as simulate past 2011
self.seed = self.cen11.astype(float)
| 45.461538
| 121
| 0.603955
|
6900fe08f4d8b068481d7dbed2105bc7d3a47b25
| 6,406
|
py
|
Python
|
localstack/utils/persistence.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | 1
|
2020-09-27T06:56:06.000Z
|
2020-09-27T06:56:06.000Z
|
localstack/utils/persistence.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | null | null | null |
localstack/utils/persistence.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | 1
|
2020-08-07T12:49:58.000Z
|
2020-08-07T12:49:58.000Z
|
import os
import re
import json
import base64
import traceback
import requests
import logging
from six import add_metaclass
from abc import ABCMeta, abstractmethod
from localstack.config import DATA_DIR
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_bytes, to_str
from localstack.utils.bootstrap import is_api_enabled
from localstack.services.generic_proxy import ProxyListener
USE_SINGLE_DUMP_FILE = True
if USE_SINGLE_DUMP_FILE:
API_FILE_PATTERN = '{data_dir}/recorded_api_calls.json'
else:
API_FILE_PATTERN = '{data_dir}/{api}_api_calls.json'
# Stack with flags to indicate whether we are currently re-playing API calls.
# (We should not be re-playing and recording at the same time)
CURRENTLY_REPLAYING = []
# file paths by API
API_FILE_PATHS = {}
# set up logger
LOG = logging.getLogger(__name__)
@add_metaclass(ABCMeta)
class PersistingProxyListener(ProxyListener):
"""
This proxy listener could be extended by any API that wishes to record its requests and responses,
via the existing persistence facility.
"""
SKIP_PERSISTENCE_TARGET_METHOD_REGEX = re.compile(r'.*\.List|.*\.Describe|.*\.Get')
def return_response(self, method, path, data, headers, response, request_handler=None):
res = super(PersistingProxyListener, self).return_response(method, path, data, headers, response,
request_handler)
if self.should_persist(method, path, data, headers, response):
record(self.api_name(), to_str(method), to_str(path), data, headers, response)
return res
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def should_persist(self, method, path, data, headers, response):
"""
Every API listener may choose which endpoints should be persisted;
The default behavior is persisting all calls with:
- HTTP PUT / POST / DELETE methods
- Successful response (non 4xx, 5xx)
- Excluding methods with 'Describe', 'List', and 'Get' in the X-Amz-Target header
:param method: The HTTP method name (e.g. 'GET', 'POST')
:param path: The HTTP path (e.g. '/update')
:param data: The request body
:param headers: HTTP response headers
:param response: HTTP response object
:return: If True, will persist the current API call.
:rtype bool
"""
target_method = headers.get('X-Amz-Target', '')
skip_target_method = self.SKIP_PERSISTENCE_TARGET_METHOD_REGEX.match(target_method, re.I)
return should_record(method) and response is not None and response.ok and skip_target_method is None
@abstractmethod
def api_name(self):
""" This should return the name of the API we're operating against, e.g. 'sqs' """
raise NotImplementedError('Implement me')
def should_record(method):
""" Decide whether or not a given API call should be recorded (persisted to disk) """
return method in ['PUT', 'POST', 'DELETE']
def record(api, method=None, path=None, data=None, headers=None, response=None, request=None):
""" Record a given API call to a persistent file on disk """
file_path = get_file_path(api)
if CURRENTLY_REPLAYING or not file_path:
return
if request:
method = method or request.method
path = path or request.path
headers = headers or request.headers
data = data or request.data
should_be_recorded = should_record(method)
if not should_be_recorded:
return
try:
if isinstance(data, dict):
data = json.dumps(data)
def get_recordable_data(request_data):
if request_data or request_data in [u'', b'']:
try:
request_data = to_bytes(request_data)
except Exception as ex:
LOG.warning('Unable to call to_bytes: %s' % ex)
request_data = to_str(base64.b64encode(request_data))
return request_data
data = get_recordable_data(data)
response_data = get_recordable_data('' if response is None else response.content)
entry = {
'a': api,
'm': method,
'p': path,
'd': data,
'h': dict(headers),
'rd': response_data
}
with open(file_path, 'a') as dumpfile:
dumpfile.write('%s\n' % json.dumps(entry))
except Exception as e:
print('Error recording API call to persistent file: %s %s' % (e, traceback.format_exc()))
def prepare_replay_data(command):
data = command['d']
data = data and base64.b64decode(data)
return data
def replay_command(command):
api = command['a']
if not is_api_enabled(api):
return
function = getattr(requests, command['m'].lower())
data = prepare_replay_data(command)
endpoint = aws_stack.get_local_service_url(api)
full_url = (endpoint[:-1] if endpoint.endswith('/') else endpoint) + command['p']
response = function(full_url, data=data, headers=command['h'], verify=False)
return response
def replay(api):
file_path = get_file_path(api)
if not file_path:
return
CURRENTLY_REPLAYING.append(True)
count = 0
try:
with open(file_path, 'r') as reader:
for line in reader:
if line.strip():
count += 1
command = json.loads(line)
replay_command(command)
finally:
CURRENTLY_REPLAYING.pop(0)
if count:
LOG.info('Restored %s API calls from persistent file: %s' % (count, file_path))
def restore_persisted_data(apis):
if USE_SINGLE_DUMP_FILE:
return replay('_all_')
apis = apis if isinstance(apis, list) else [apis]
for api in apis:
replay(api)
# ---------------
# HELPER METHODS
# ---------------
def get_file_path(api, create=True):
if api not in API_FILE_PATHS:
API_FILE_PATHS[api] = False
if not DATA_DIR:
return False
file_path = API_FILE_PATTERN.format(data_dir=DATA_DIR, api=api)
if create and not os.path.exists(file_path):
with open(file_path, 'a'):
os.utime(file_path, None)
if os.path.exists(file_path):
API_FILE_PATHS[api] = file_path
return API_FILE_PATHS.get(api)
| 33.364583
| 108
| 0.645489
|
959b337839feb408e0ca14c539bdba98142b253c
| 1,572
|
py
|
Python
|
src/PythonClient.py
|
indiajoe/JuliaProcessingServer.jl
|
75e683902fc4db011a9ed811c39c3351140bbfc6
|
[
"TCP-wrappers"
] | null | null | null |
src/PythonClient.py
|
indiajoe/JuliaProcessingServer.jl
|
75e683902fc4db011a9ed811c39c3351140bbfc6
|
[
"TCP-wrappers"
] | null | null | null |
src/PythonClient.py
|
indiajoe/JuliaProcessingServer.jl
|
75e683902fc4db011a9ed811c39c3351140bbfc6
|
[
"TCP-wrappers"
] | null | null | null |
#!/usr/bin/env python
""" This script is to send numpy assray to the Julia server """
import socket
import numpy as np
import logging
from cStringIO import StringIO
PortOfServer = 8006
Array = np.random.random((3,4,5))*10
def get_RemoteProcessedData(DataCube,port,hostname="localhost"):
""" Sends the DataCube to server at hostname:port and return the data received back from server """
client_socket = socket.socket()
try:
client_socket.connect((hostname,port))
except socket.error as e:
logging.error('Unable to connect to Data Processing server {0}:{1}'.format(hostname,port))
raise
logging.info('Sending ndarray of shape {0} to {1}:{2}'.format(DataCube.shape,hostname,port))
# Send the Array
f = StringIO()
np.save(f,DataCube)
f.seek(0)
client_socket.sendall(f.read())
f.close()
# Now start reading back form the socket
ultimate_buffer = ""
while True:
receiving_buffer = client_socket.recv(1024)
if not receiving_buffer: break
ultimate_buffer += receiving_buffer
DataBack = np.load(StringIO(ultimate_buffer))
logging.info('Received back ndarray of shape {0}'.format(DataBack.shape))
client_socket.close()
return DataBack
print('Sending The following array')
print(Array)
ProcessedArray = get_RemoteProcessedData(Array,PortOfServer)
print('Array Received back:')
print(ProcessedArray)
| 34.173913
| 110
| 0.638041
|
2b2c4cf0a90c3d057f8f22b8501b11ab25bdb337
| 16,122
|
py
|
Python
|
Lib/test/test_ftplib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_ftplib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_ftplib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import io
from unittest import TestCase
from test import support
from test.support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
def push(self, what):
super(DummyDTPHandler, self).push(what.encode('ascii'))
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send(b"1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| 30.708571
| 82
| 0.607865
|
4d63af818381889409dd4ee964b413ffc8d6d8c0
| 10,829
|
py
|
Python
|
rllib/policy/torch_policy.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
rllib/policy/torch_policy.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
rllib/policy/torch_policy.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import time
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_torch
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.tracking_dict import UsageTrackingDict
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
torch, _ = try_import_torch()
class TorchPolicy(Policy):
"""Template for a PyTorch policy and loss to use with RLlib.
This is similar to TFPolicy, but for PyTorch.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): Torch model instance
dist_class (type): Torch action distribution class
"""
def __init__(self, observation_space, action_space, config, model, loss,
action_distribution_class):
"""Build a policy from policy and loss torch modules.
Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES
is set. Only single GPU is supported for now.
Arguments:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): The Policy config dict.
model (nn.Module): PyTorch policy module. Given observations as
input, this module must return a list of outputs where the
first item is action logits, and the rest can be any value.
loss (func): Function that takes (policy, model, dist_class,
train_batch) and returns a single scalar loss.
action_distribution_class (ActionDistribution): Class for action
distribution.
"""
super().__init__(observation_space, action_space, config)
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
self.model = model.to(self.device)
self.unwrapped_model = model # used to support DistributedDataParallel
self._loss = loss
self._optimizer = self.optimizer()
self.dist_class = action_distribution_class
# If set, means we are using distributed allreduce during learning.
self.distributed_world_size = None
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
explore=True,
timestep=None,
**kwargs):
with torch.no_grad():
input_dict = self._lazy_tensor_dict({
SampleBatch.CUR_OBS: obs_batch,
})
if prev_action_batch:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
model_out = self.model(input_dict, state_batches, [1])
logits, state = model_out
action_dist = self.dist_class(logits, self.model)
# Try our Exploration, if any.
if self.exploration:
actions = self.exploration.get_action(
model_out, self.model, action_dist, explore, timestep
if timestep is not None else self.global_timestep)
else:
actions = action_dist.sample()
input_dict[SampleBatch.ACTIONS] = actions
return (actions.cpu().numpy(), [h.cpu().numpy() for h in state],
self.extra_action_out(input_dict, state_batches,
self.model, action_dist))
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out = self._loss(self, self.model, self.dist_class, train_batch)
self._optimizer.zero_grad()
loss_out.backward()
info = {}
info.update(self.extra_grad_process())
if self.distributed_world_size:
grads = []
for p in self.model.parameters():
if p.grad is not None:
grads.append(p.grad)
start = time.time()
if torch.cuda.is_available():
# Sadly, allreduce_coalesced does not work with CUDA yet.
for g in grads:
torch.distributed.all_reduce(
g, op=torch.distributed.ReduceOp.SUM)
else:
torch.distributed.all_reduce_coalesced(
grads, op=torch.distributed.ReduceOp.SUM)
for p in self.model.parameters():
if p.grad is not None:
p.grad /= self.distributed_world_size
info["allreduce_latency"] = time.time() - start
self._optimizer.step()
info.update(self.extra_grad_info(train_batch))
return {LEARNER_STATS_KEY: info}
@override(Policy)
def compute_gradients(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out = self._loss(self, self.model, self.dist_class, train_batch)
self._optimizer.zero_grad()
loss_out.backward()
grad_process_info = self.extra_grad_process()
# Note that return values are just references;
# calling zero_grad will modify the values
grads = []
for p in self.model.parameters():
if p.grad is not None:
grads.append(p.grad.data.cpu().numpy())
else:
grads.append(None)
grad_info = self.extra_grad_info(train_batch)
grad_info.update(grad_process_info)
return grads, {LEARNER_STATS_KEY: grad_info}
@override(Policy)
def apply_gradients(self, gradients):
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizer.step()
@override(Policy)
def get_weights(self):
return {k: v.cpu() for k, v in self.model.state_dict().items()}
@override(Policy)
def set_weights(self, weights):
self.model.load_state_dict(weights)
@override(Policy)
def is_recurrent(self):
return len(self.model.get_initial_state()) > 0
@override(Policy)
def num_state_tensors(self):
return len(self.model.get_initial_state())
@override(Policy)
def get_initial_state(self):
return [s.numpy() for s in self.model.get_initial_state()]
def extra_grad_process(self):
"""Allow subclass to do extra processing on gradients and
return processing info."""
return {}
def extra_action_out(self,
input_dict,
state_batches,
model,
action_dist=None):
"""Returns dict of extra info to include in experience batch.
Arguments:
input_dict (dict): Dict of model input tensors.
state_batches (list): List of state tensors.
model (TorchModelV2): Reference to the model.
action_dist (Distribution): Torch Distribution object to get
log-probs (e.g. for already sampled actions).
"""
return {}
def extra_grad_info(self, train_batch):
"""Return dict of extra grad info."""
return {}
def optimizer(self):
"""Custom PyTorch optimizer to use."""
if hasattr(self, "config"):
return torch.optim.Adam(
self.model.parameters(), lr=self.config["lr"])
else:
return torch.optim.Adam(self.model.parameters())
def _lazy_tensor_dict(self, postprocessed_batch):
train_batch = UsageTrackingDict(postprocessed_batch)
def convert(arr):
if torch.is_tensor(arr):
return arr.to(self.device)
tensor = torch.from_numpy(np.asarray(arr))
if tensor.dtype == torch.double:
tensor = tensor.float()
return tensor.to(self.device)
train_batch.set_get_interceptor(convert)
return train_batch
@override(Policy)
def export_model(self, export_dir):
"""TODO: implement for torch.
"""
raise NotImplementedError
@override(Policy)
def export_checkpoint(self, export_dir):
"""TODO: implement for torch.
"""
raise NotImplementedError
@DeveloperAPI
class LearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self.cur_lr = lr
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1])
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
self.cur_lr = self.lr_schedule.value(global_vars["timestep"])
@override(TorchPolicy)
def optimizer(self):
for p in self._optimizer.param_groups:
p["lr"] = self.cur_lr
return self._optimizer
@DeveloperAPI
class EntropyCoeffSchedule:
"""Mixin for TorchPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self.entropy_coeff = entropy_coeff
if entropy_coeff_schedule is None:
self.entropy_coeff_schedule = ConstantSchedule(entropy_coeff)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self.entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1])
else:
# Implements previous version but enforces outside_value
self.entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
self.entropy_coeff = self.entropy_coeff_schedule.value(
global_vars["timestep"])
| 37.085616
| 79
| 0.614831
|
61523daa8578e69fbc093207761c283f79840f70
| 1,521
|
py
|
Python
|
instaphotos/migrations/0001_initial.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
instaphotos/migrations/0001_initial.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
instaphotos/migrations/0001_initial.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-02 09:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('profile_pic', models.ImageField(blank='true', upload_to='new_post/')),
('bio', models.TextField()),
('user', models.OneToOneField(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default='default.jpg', upload_to='new_post/')),
('title', models.CharField(default='', max_length=30)),
('caption', models.TextField(max_length=300)),
('user', models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='author', to=settings.AUTH_USER_MODEL)),
],
),
]
| 40.026316
| 164
| 0.608153
|
2cf36141692dd986988c8b9efd2d632ac1b36845
| 423
|
py
|
Python
|
swagger/swagger_server/test/__init__.py
|
mcclown/AutoReef.APIGateway
|
b77dd6e50beba3d0bb36dfc948e8514a7e5a425d
|
[
"MIT"
] | null | null | null |
swagger/swagger_server/test/__init__.py
|
mcclown/AutoReef.APIGateway
|
b77dd6e50beba3d0bb36dfc948e8514a7e5a425d
|
[
"MIT"
] | 5
|
2018-03-12T13:24:49.000Z
|
2018-03-12T13:37:14.000Z
|
swagger/swagger_server/test/__init__.py
|
mcclown/AutoReef.APIGateway
|
b77dd6e50beba3d0bb36dfc948e8514a7e5a425d
|
[
"MIT"
] | null | null | null |
import logging
import connexion
from flask_testing import TestCase
from swagger.swagger_server.encoder import JSONEncoder
class BaseTestCase(TestCase):
def create_app(self):
logging.getLogger('connexion.operation').setLevel('ERROR')
app = connexion.App(__name__, specification_dir='../swagger/')
app.app.json_encoder = JSONEncoder
app.add_api('swagger.yaml')
return app.app
| 24.882353
| 70
| 0.723404
|
c47ad3a1d836bf9c4e3579766609192e22e791ff
| 10,968
|
py
|
Python
|
_nt.py
|
gndu91/Windows-Sound-Manager
|
cdcf7a0044aed14abf5a4c48babf8f7ce2ff4fab
|
[
"MIT"
] | null | null | null |
_nt.py
|
gndu91/Windows-Sound-Manager
|
cdcf7a0044aed14abf5a4c48babf8f7ce2ff4fab
|
[
"MIT"
] | null | null | null |
_nt.py
|
gndu91/Windows-Sound-Manager
|
cdcf7a0044aed14abf5a4c48babf8f7ce2ff4fab
|
[
"MIT"
] | null | null | null |
# Code found in the Python Mailing List:
# https://mail.python.org/pipermail/python-win32/2014-March/013080.html
# Author: Tim Roberts timr at probo.com
# Tested on my computer (Windows 10, Python 3.7, AMD64)
import operator
import random
import unittest
from ctypes.wintypes import BOOL
import comtypes.client
from comtypes import *
try:
import win32com
except ImportError as e:
raise ImportError(
'An error occurred while trying to import some packages.\n'
'Make sure pypiwin32 is installed (command: `pip install pypiwin32`)\n'
f'Origin: {e!r}'
)
MMDeviceApiLib = GUID('{2FDAAFA3-7523-4F66-9957-9D5E7FE698F6}')
IID_IMMDevice = GUID('{D666063F-1587-4E43-81F1-B948E807363F}')
IID_IMMDeviceEnumerator = GUID('{A95664D2-9614-4F35-A746-DE8DB63617E6}')
CLSID_MMDeviceEnumerator = GUID('{BCDE0395-E52F-467C-8E3D-C4579291692E}')
IID_IMMDeviceCollection = GUID('{0BD7A1BE-7A1A-44DB-8397-CC5392387B5E}')
IID_IAudioEndpointVolume = GUID('{5CDF2C82-841E-4546-9722-0CF74078229A}')
class IMMDeviceCollection(IUnknown):
_iid_ = GUID('{0BD7A1BE-7A1A-44DB-8397-CC5392387B5E}')
class IAudioEndpointVolume(IUnknown):
_iid_ = GUID('{5CDF2C82-841E-4546-9722-0CF74078229A}')
_methods_ = [
STDMETHOD(HRESULT, 'RegisterControlChangeNotify', []),
STDMETHOD(HRESULT, 'UnregisterControlChangeNotify', []),
STDMETHOD(HRESULT, 'GetChannelCount', []),
COMMETHOD([], HRESULT, 'SetMasterVolumeLevel',
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'SetMasterVolumeLevelScalar',
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'GetMasterVolumeLevel',
(['out', 'retval'], POINTER(c_float), 'pfLevelDB')),
COMMETHOD([], HRESULT, 'GetMasterVolumeLevelScalar',
(['out', 'retval'], POINTER(c_float), 'pfLevelDB')),
COMMETHOD([], HRESULT, 'SetChannelVolumeLevel',
(['in'], DWORD, 'nChannel'),
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'SetChannelVolumeLevelScalar',
(['in'], DWORD, 'nChannel'),
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'GetChannelVolumeLevel',
(['in'], DWORD, 'nChannel'),
(['out', 'retval'], POINTER(c_float), 'pfLevelDB')),
COMMETHOD([], HRESULT, 'GetChannelVolumeLevelScalar',
(['in'], DWORD, 'nChannel'),
(['out', 'retval'], POINTER(c_float), 'pfLevelDB')),
COMMETHOD([], HRESULT, 'SetMute',
(['in'], BOOL, 'bMute'),
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'GetMute',
(['out', 'retval'], POINTER(BOOL), 'pbMute')),
COMMETHOD([], HRESULT, 'GetVolumeStepInfo',
(['out', 'retval'], POINTER(c_float), 'pnStep'),
(['out', 'retval'], POINTER(c_float), 'pnStepCount')),
COMMETHOD([], HRESULT, 'VolumeStepUp',
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'VolumeStepDown',
(['in'], POINTER(GUID), 'pguidEventContext')),
COMMETHOD([], HRESULT, 'QueryHardwareSupport',
(['out', 'retval'], POINTER(DWORD), 'pdwHardwareSupportMask')),
COMMETHOD([], HRESULT, 'GetVolumeRange',
(['out', 'retval'], POINTER(c_float), 'pfMin'),
(['out', 'retval'], POINTER(c_float), 'pfMax'),
(['out', 'retval'], POINTER(c_float), 'pfIncr'))]
class IMMDevice(IUnknown):
_iid_ = GUID('{D666063F-1587-4E43-81F1-B948E807363F}')
_methods_ = [
COMMETHOD([], HRESULT, 'Activate',
(['in'], POINTER(GUID), 'iid'),
(['in'], DWORD, 'dwClsCtx'),
(['in'], POINTER(DWORD), 'pActivationParans'),
(['out', 'retval'], POINTER(POINTER(IAudioEndpointVolume)), 'ppInterface')),
STDMETHOD(HRESULT, 'OpenPropertyStore', []),
STDMETHOD(HRESULT, 'GetId', []),
STDMETHOD(HRESULT, 'GetState', [])]
class IMMDeviceEnumerator(comtypes.IUnknown):
_iid_ = GUID('{A95664D2-9614-4F35-A746-DE8DB63617E6}')
_methods_ = [
COMMETHOD([], HRESULT, 'EnumAudioEndpoints',
(['in'], DWORD, 'dataFlow'),
(['in'], DWORD, 'dwStateMask'),
(['out', 'retval'], POINTER(POINTER(IMMDeviceCollection)), 'ppDevices')),
COMMETHOD([], HRESULT, 'GetDefaultAudioEndpoint',
(['in'], DWORD, 'dataFlow'),
(['in'], DWORD, 'role'),
(['out', 'retval'], POINTER(POINTER(IMMDevice)), 'ppDevices'))]
class AudioDevice:
def __init__(self, dev):
self.__device = dev
@staticmethod
def get_default_output_device():
"""Return the default device.
Warning: the device may not always be the default one, for
instance, if the user connects/disconnects earphones."""
enumerator = comtypes.CoCreateInstance(
CLSID_MMDeviceEnumerator,
IMMDeviceEnumerator,
comtypes.CLSCTX_INPROC_SERVER
)
endpoint = enumerator.GetDefaultAudioEndpoint(0, 1)
return AudioDevice(endpoint.Activate(IID_IAudioEndpointVolume, comtypes.CLSCTX_INPROC_SERVER, None))
@classmethod
def sanitize_volume(cls, volume):
volume = float(volume)
if volume < 0:
return 0
if volume > 100:
return 100
return volume
@property
def current(self) -> float:
"""Return the current in percent."""
return round(self.__device.GetMasterVolumeLevelScalar() * 100)
@current.setter
def current(self, value: float):
# TODO: Learn more about the `pguidEventContext` argument
print('Set to', self.sanitize_volume(value))
self.__device.SetMasterVolumeLevelScalar(self.sanitize_volume(value) / 100, None)
@property
def mute(self):
return bool(self.__device.GetMute())
@mute.setter
def mute(self, value):
self.__device.SetMute(bool(value), None)
def increase(self):
self.__device.VolumeStepUp(None)
def decrease(self):
self.__device.VolumeStepDown(None)
def __iadd__(self, other):
self.current = self.current + other
return self
def __isub__(self, other):
self.current = self.current - other
return self
def __imul__(self, other):
self.current = self.current * other
return self
def __idiv__(self, other):
self.current = self.current / other
return self
def __imod__(self, other):
self.current = self.current % other
return self
# TODO: Add rounding before we compare
def __cmp__(self, other):
return (self.current > other) - (other > self.current)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __int__(self):
return int(self.current)
def __float__(self):
return self.current
def __bool__(self):
return bool(int(self))
def __add__(self, other):
return self.current + other
def __sub__(self, other):
return self.current - other
def __mul__(self, other):
return self.current * other
def __truediv__(self, other):
return operator.truediv(self.current, other)
def __divmod__(self, other):
return divmod(self.current, other)
def __mod__(self, other):
return self.current % other
def __neg__(self):
return -self.current
def __abs__(self):
"""The volume is always positive"""
return self.current
def __radd__(self, other):
return self.current + other
def __rdiv__(self, other):
return other / self.current
def __rdivmod__(self, other):
return divmod(other, self.current)
def __rfloordiv__(self, other):
return other // self.current
def __rmod__(self, other):
return operator.mod(other, self.current)
def __rmul__(self, other):
return self.current * other
def __rsub__(self, other):
return other - self.current
__device = None
def refresh_device():
"""Refresh the current audio device."""
global __device
__device = AudioDevice.get_default_output_device()
def get_volume_device() -> AudioDevice:
"""
TODO: Force the device to be refreshed in a regular basis, for instance
in the case a headphone is connected/disconnected
TODO: Decide whether or not race conditions can be a problem
:return:
"""
if __device is None:
refresh_device()
assert isinstance(__device, AudioDevice)
return __device
class Tests(unittest.TestCase):
# TODO: Add more tests
def test_operations(self):
device = get_volume_device()
assert 1 + device - 1 == device
old = device.current
device *= 1
self.assertEqual(device, old)
device /= 1
self.assertEqual(device, old)
device += 0
self.assertEqual(device, old)
device -= 0
self.assertEqual(device, old)
self.assertEqual(device - device, 0)
self.assertEqual(device + device, device * 2)
self.assertEqual(device + device, 2 * device)
self.assertEqual(device % 100, old)
for i in range(256):
string = '\t'.join((
random.choice(('device', f'{random.random() * 1024}')),
random.choice('+*/-%'),
random.choice(('device', f'{(random.random()) * 1024}'))))
print(string + ' = ' + str(eval(string)))
self.assertEqual(eval(string), eval(string.replace('device', str(float(device)))))
def test_setter(self):
device = get_volume_device()
old = device.current
assert old == device.current
device.current = old
assert old == device.current
assert old == device
assert old + 1 == device + 1
assert old + 1 != device - 1
print(('%r' % device.current) + '%')
device.current = 64
device.mute = 0
device += 1
print(('%r' % device.current) + '%')
device += 1
print(repr(device.mute))
| 33.747692
| 108
| 0.591265
|
8bd28155c7551f3e6204599c3605beb018b92473
| 37,999
|
py
|
Python
|
scripts/python/catalyst/tests/test_build_isothermal.py
|
coelectrolyzer/cats
|
21f8e6f5f176767ec403ad2738c80a5a71fba959
|
[
"MIT"
] | null | null | null |
scripts/python/catalyst/tests/test_build_isothermal.py
|
coelectrolyzer/cats
|
21f8e6f5f176767ec403ad2738c80a5a71fba959
|
[
"MIT"
] | null | null | null |
scripts/python/catalyst/tests/test_build_isothermal.py
|
coelectrolyzer/cats
|
21f8e6f5f176767ec403ad2738c80a5a71fba959
|
[
"MIT"
] | null | null | null |
''' Testing of building an isothermal model '''
import sys
sys.path.append('../..')
import unittest
import pytest
from catalyst.isothermal_monolith_catalysis import *
import logging
__author__ = "Austin Ladshaw"
_log = logging.getLogger(__name__)
# Start test class
class TestBasicIsothermalCatalystBuild():
@pytest.fixture(scope="class")
def isothermal_object(self):
obj = Isothermal_Monolith_Simulator()
return obj
@pytest.fixture(scope="class")
def isothermal_object_with_lists(self):
obj = Isothermal_Monolith_Simulator()
return obj
@pytest.mark.build
def test_add_dim(self, isothermal_object):
obj = isothermal_object
obj.add_axial_dim(0,5)
assert hasattr(obj.model, 'z')
assert isinstance(obj.model.z, ContinuousSet)
@pytest.mark.build
def test_add_dim_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_axial_dim(point_list=[0,1,2,3,4,5])
assert hasattr(obj.model, 'z')
assert isinstance(obj.model.z, ContinuousSet)
assert len(obj.model.z) == 6
@pytest.mark.build
def test_add_temporal_dim(self, isothermal_object):
obj = isothermal_object
obj.add_temporal_dim(0,20)
assert hasattr(obj.model, 't')
assert isinstance(obj.model.t, ContinuousSet)
@pytest.mark.build
def test_add_temporal_dim_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_temporal_dim(point_list=[0,4,8,12,16,20])
assert hasattr(obj.model, 't')
assert isinstance(obj.model.t, ContinuousSet)
assert len(obj.model.t) == 6
@pytest.mark.build
def test_add_age_set(self, isothermal_object):
obj = isothermal_object
obj.add_age_set("Unaged")
assert hasattr(obj.model, 'age_set')
assert isinstance(obj.model.age_set, Set)
assert len(obj.model.age_set) == 1
@pytest.mark.build
def test_add_age_set_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_age_set(["Unaged", "2hr"])
assert hasattr(obj.model, 'age_set')
assert isinstance(obj.model.age_set, Set)
assert len(obj.model.age_set) == 2
@pytest.mark.build
def test_add_temperature_set(self, isothermal_object):
obj = isothermal_object
obj.add_temperature_set("250C")
assert hasattr(obj.model, 'T_set')
assert isinstance(obj.model.T_set, Set)
assert len(obj.model.T_set) == 1
assert hasattr(obj.model, 'T')
assert isinstance(obj.model.T, Var)
assert hasattr(obj.model, 'space_velocity')
assert isinstance(obj.model.space_velocity, Var)
assert hasattr(obj.model, 'v')
assert isinstance(obj.model.v, Var)
assert hasattr(obj.model, 'P')
assert isinstance(obj.model.P, Var)
assert hasattr(obj.model, 'Tref')
assert isinstance(obj.model.Tref, Param)
assert hasattr(obj.model, 'Pref')
assert isinstance(obj.model.Pref, Param)
assert hasattr(obj.model, 'rho')
assert isinstance(obj.model.rho, Var)
assert hasattr(obj.model, 'mu')
assert isinstance(obj.model.mu, Var)
assert hasattr(obj.model, 'Re')
assert isinstance(obj.model.Re, Var)
@pytest.mark.build
def test_add_temperature_set_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_temperature_set(["250C","300C"])
assert hasattr(obj.model, 'T_set')
assert isinstance(obj.model.T_set, Set)
assert len(obj.model.T_set) == 2
assert hasattr(obj.model, 'T')
assert isinstance(obj.model.T, Var)
assert hasattr(obj.model, 'space_velocity')
assert isinstance(obj.model.space_velocity, Var)
assert hasattr(obj.model, 'v')
assert isinstance(obj.model.v, Var)
assert hasattr(obj.model, 'P')
assert isinstance(obj.model.P, Var)
assert hasattr(obj.model, 'Tref')
assert isinstance(obj.model.Tref, Param)
assert hasattr(obj.model, 'Pref')
assert isinstance(obj.model.Pref, Param)
assert hasattr(obj.model, 'rho')
assert isinstance(obj.model.rho, Var)
assert hasattr(obj.model, 'mu')
assert isinstance(obj.model.mu, Var)
assert hasattr(obj.model, 'Re')
assert isinstance(obj.model.Re, Var)
@pytest.mark.build
def test_add_gas_species(self, isothermal_object):
obj = isothermal_object
obj.add_gas_species("NH3")
assert hasattr(obj.model, 'gas_set')
assert isinstance(obj.model.gas_set, Set)
assert len(obj.model.gas_set) == 1
assert hasattr(obj.model, 'Cb')
assert isinstance(obj.model.Cb, Var)
assert hasattr(obj.model, 'C')
assert isinstance(obj.model.C, Var)
assert hasattr(obj.model, 'dCb_dz')
assert isinstance(obj.model.dCb_dz, DerivativeVar)
assert hasattr(obj.model, 'dCb_dt')
assert isinstance(obj.model.dCb_dt, DerivativeVar)
assert hasattr(obj.model, 'dC_dt')
assert isinstance(obj.model.dC_dt, DerivativeVar)
assert hasattr(obj.model, 'km')
assert isinstance(obj.model.km, Var)
assert hasattr(obj.model, 'Dm')
assert isinstance(obj.model.Dm, Param)
assert hasattr(obj.model, 'Sc')
assert isinstance(obj.model.Sc, Var)
assert hasattr(obj.model, 'Sh')
assert isinstance(obj.model.Sh, Var)
@pytest.mark.build
def test_add_gas_species_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_gas_species(["NH3","NO"])
assert hasattr(obj.model, 'gas_set')
assert isinstance(obj.model.gas_set, Set)
assert len(obj.model.gas_set) == 2
assert hasattr(obj.model, 'Cb')
assert isinstance(obj.model.Cb, Var)
assert hasattr(obj.model, 'C')
assert isinstance(obj.model.C, Var)
assert hasattr(obj.model, 'dCb_dz')
assert isinstance(obj.model.dCb_dz, DerivativeVar)
assert hasattr(obj.model, 'dCb_dt')
assert isinstance(obj.model.dCb_dt, DerivativeVar)
assert hasattr(obj.model, 'dC_dt')
assert isinstance(obj.model.dC_dt, DerivativeVar)
assert hasattr(obj.model, 'km')
assert isinstance(obj.model.km, Var)
assert hasattr(obj.model, 'Dm')
assert isinstance(obj.model.Dm, Param)
assert hasattr(obj.model, 'Sc')
assert isinstance(obj.model.Sc, Var)
assert hasattr(obj.model, 'Sh')
assert isinstance(obj.model.Sh, Var)
@pytest.mark.build
def test_add_surface_species(self, isothermal_object):
obj = isothermal_object
obj.add_surface_species("ZNH4")
assert hasattr(obj.model, 'surf_set')
assert isinstance(obj.model.surf_set, Set)
assert len(obj.model.surf_set) == 1
assert hasattr(obj.model, 'q')
assert isinstance(obj.model.q, Var)
assert hasattr(obj.model, 'dq_dt')
assert isinstance(obj.model.dq_dt, DerivativeVar)
@pytest.mark.build
def test_add_surface_species_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_surface_species(["ZNH4","ZH"])
assert hasattr(obj.model, 'surf_set')
assert isinstance(obj.model.surf_set, Set)
assert len(obj.model.surf_set) == 2
assert hasattr(obj.model, 'q')
assert isinstance(obj.model.q, Var)
assert hasattr(obj.model, 'dq_dt')
assert isinstance(obj.model.dq_dt, DerivativeVar)
@pytest.mark.build
def test_add_surface_sites(self, isothermal_object):
obj = isothermal_object
obj.add_surface_sites("ZH")
assert hasattr(obj.model, 'site_set')
assert isinstance(obj.model.site_set, Set)
assert len(obj.model.site_set) == 1
assert hasattr(obj.model, 'S')
assert isinstance(obj.model.S, Var)
assert hasattr(obj.model, 'Smax')
assert isinstance(obj.model.Smax, Param)
assert hasattr(obj.model, 'u_S')
assert isinstance(obj.model.u_S, Param)
@pytest.mark.build
def test_add_surface_sites_list(self):
obj = Isothermal_Monolith_Simulator()
obj.add_axial_dim(0,5)
obj.add_temporal_dim(0,10)
obj.add_age_set("Unaged")
obj.add_temperature_set("250C")
obj.add_gas_species("NH3")
obj.add_surface_species("ZNH4")
obj.add_surface_sites(["S1","S2"])
assert hasattr(obj.model, 'site_set')
assert isinstance(obj.model.site_set, Set)
assert len(obj.model.site_set) == 2
assert hasattr(obj.model, 'S')
assert isinstance(obj.model.S, Var)
assert hasattr(obj.model, 'Smax')
assert isinstance(obj.model.Smax, Param)
assert hasattr(obj.model, 'u_S')
assert isinstance(obj.model.u_S, Param)
@pytest.mark.build
def test_add_reactions_equ(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
rxn_dict = {"r1": ReactionType.EquilibriumArrhenius}
obj.add_reactions(rxn_dict)
obj_with_lists.add_reactions(rxn_dict)
assert hasattr(obj.model, 'all_rxns')
assert isinstance(obj.model.all_rxns, Set)
assert len(obj.model.all_rxns) == 1
assert hasattr(obj_with_lists.model, 'all_rxns')
assert isinstance(obj_with_lists.model.all_rxns, Set)
assert len(obj_with_lists.model.all_rxns) == 1
assert hasattr(obj.model, 'arrhenius_rxns')
assert isinstance(obj.model.arrhenius_rxns, Set)
assert len(obj.model.arrhenius_rxns) == 0
assert hasattr(obj_with_lists.model, 'arrhenius_rxns')
assert isinstance(obj_with_lists.model.arrhenius_rxns, Set)
assert len(obj_with_lists.model.arrhenius_rxns) == 0
assert hasattr(obj.model, 'equ_arrhenius_rxns')
assert isinstance(obj.model.equ_arrhenius_rxns, Set)
assert len(obj.model.equ_arrhenius_rxns) == 1
assert hasattr(obj_with_lists.model, 'equ_arrhenius_rxns')
assert isinstance(obj_with_lists.model.equ_arrhenius_rxns, Set)
assert len(obj_with_lists.model.equ_arrhenius_rxns) == 1
assert hasattr(obj.model, 'u_C')
assert isinstance(obj.model.u_C, Param)
assert hasattr(obj.model, 'u_q')
assert isinstance(obj.model.u_q, Param)
assert hasattr(obj_with_lists.model, 'u_C')
assert isinstance(obj_with_lists.model.u_C, Param)
assert hasattr(obj_with_lists.model, 'u_q')
assert isinstance(obj_with_lists.model.u_q, Param)
assert hasattr(obj.model, 'A')
assert isinstance(obj.model.A, Var)
assert hasattr(obj.model, 'B')
assert isinstance(obj.model.B, Var)
assert hasattr(obj.model, 'E')
assert isinstance(obj.model.E, Var)
assert hasattr(obj.model, 'Af')
assert isinstance(obj.model.Af, Var)
assert hasattr(obj.model, 'Ef')
assert isinstance(obj.model.Ef, Var)
assert hasattr(obj.model, 'dH')
assert isinstance(obj.model.dH, Var)
assert hasattr(obj.model, 'dS')
assert isinstance(obj.model.dS, Var)
assert hasattr(obj_with_lists.model, 'A')
assert isinstance(obj_with_lists.model.A, Var)
assert hasattr(obj_with_lists.model, 'B')
assert isinstance(obj_with_lists.model.B, Var)
assert hasattr(obj_with_lists.model, 'E')
assert isinstance(obj_with_lists.model.E, Var)
assert hasattr(obj_with_lists.model, 'Af')
assert isinstance(obj_with_lists.model.Af, Var)
assert hasattr(obj_with_lists.model, 'Ef')
assert isinstance(obj_with_lists.model.Ef, Var)
assert hasattr(obj_with_lists.model, 'dH')
assert isinstance(obj_with_lists.model.dH, Var)
assert hasattr(obj_with_lists.model, 'dS')
assert isinstance(obj_with_lists.model.dS, Var)
assert hasattr(obj.model, 'all_species_set')
assert isinstance(obj.model.all_species_set, Set)
assert len(obj.model.all_species_set) == 3
assert hasattr(obj_with_lists.model, 'all_species_set')
assert isinstance(obj_with_lists.model.all_species_set, Set)
assert len(obj_with_lists.model.all_species_set) == 4
assert hasattr(obj.model, 'rxn_orders')
assert isinstance(obj.model.rxn_orders, Param)
assert hasattr(obj_with_lists.model, 'rxn_orders')
assert isinstance(obj_with_lists.model.rxn_orders, Param)
@pytest.mark.unit
def test_formfactor_calculations(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_bulk_porosity(0.3309)
obj.set_cell_density(62)
obj.set_washcoat_porosity(0.4)
obj.set_reactor_radius(1)
obj_with_lists.isMonolith = False
obj_with_lists.model.dh.set_value(0.1)
obj_with_lists.set_bulk_porosity(0.3309)
obj_with_lists.set_cell_density(62)
obj_with_lists.set_washcoat_porosity(0.4)
obj_with_lists.set_reactor_radius(1)
assert value(obj.model.eb) == 0.3309
assert value(obj.model.cell_density) == 62
assert value(obj_with_lists.model.eb) == 0.3309
assert value(obj_with_lists.model.cell_density) == 62
assert value(obj_with_lists.model.dh) == 0.1
assert value(obj_with_lists.model.Ga) == 6/0.1
assert pytest.approx(0.0777448, rel=1e-3) == value(obj.model.dh)
assert pytest.approx(28.8159, rel=1e-3) == value(obj.model.Ga)
obj_with_lists.isMonolith = True
obj_with_lists.model.dh.set_value(value(obj.model.dh))
obj_with_lists.model.Ga.set_value(value(obj.model.Ga))
assert pytest.approx(0.0777448, rel=1e-3) == value(obj_with_lists.model.dh)
assert pytest.approx(28.8159, rel=1e-3) == value(obj_with_lists.model.Ga)
obj.set_space_velocity_all_runs(1000)
obj_with_lists.set_space_velocity_all_runs(1000)
@pytest.mark.unit
def test_set_site_balance(self, isothermal_object):
obj = isothermal_object
obj.set_site_density("ZH","Unaged",0.1152619)
site_data = {"mol_occupancy": {"ZNH4": 1}}
obj.set_site_balance("ZH",site_data)
assert value(obj.model.u_S["ZH","ZNH4"]) == 1
@pytest.mark.unit
def test_set_reaction_info(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
rxn_dict = {"parameters": {"A": 250000, "E": 0,
"A_lb": 2500, "A_ub": 2500000000,
"E_lb": -1, "E_ub": 1,
"dH": -54000, "dS": 30,
"dH_lb": -55000, "dH_ub": -53000,
"dS_lb": 20, "dS_ub": 40,
},
"mol_reactants": {"ZH": 1, "NH3": 1},
"mol_products": {"ZNH4": 1},
"rxn_orders": {"ZH": 1, "NH3": 1, "ZNH4": 1}
}
obj.set_reaction_info("r1", rxn_dict)
obj_with_lists.set_reaction_info("r1", rxn_dict)
assert value(obj.model.Af["r1"].lb) == 2500
assert value(obj.model.Af["r1"].ub) == 2500000000
assert value(obj.model.Af["r1"]) == 250000
assert value(obj_with_lists.model.Af["r1"].lb) == 2500
assert value(obj_with_lists.model.Af["r1"].ub) == 2500000000
assert value(obj_with_lists.model.Af["r1"]) == 250000
assert value(obj.model.Ef["r1"].lb) == -1
assert value(obj.model.Ef["r1"].ub) == 1
assert value(obj.model.Ef["r1"]) == 0
assert value(obj_with_lists.model.Ef["r1"].lb) == -1
assert value(obj_with_lists.model.Ef["r1"].ub) == 1
assert value(obj_with_lists.model.Ef["r1"]) == 0
assert value(obj.model.dH["r1"].lb) == -55000
assert value(obj.model.dH["r1"].ub) == -53000
assert value(obj.model.dH["r1"]) == -54000
assert value(obj_with_lists.model.dH["r1"].lb) == -55000
assert value(obj_with_lists.model.dH["r1"].ub) == -53000
assert value(obj_with_lists.model.dH["r1"]) == -54000
assert value(obj.model.dS["r1"].lb) == 20
assert value(obj.model.dS["r1"].ub) == 40
assert value(obj.model.dS["r1"]) == 30
assert value(obj_with_lists.model.dS["r1"].lb) == 20
assert value(obj_with_lists.model.dS["r1"].ub) == 40
assert value(obj_with_lists.model.dS["r1"]) == 30
assert hasattr(obj.model, 'r1_reactants')
assert isinstance(obj.model.r1_reactants, Set)
assert len(obj.model.r1_reactants) == 2
assert hasattr(obj.model, 'r1_products')
assert isinstance(obj.model.r1_products, Set)
assert len(obj.model.r1_products) == 1
assert hasattr(obj_with_lists.model, 'r1_reactants')
assert isinstance(obj_with_lists.model.r1_reactants, Set)
assert len(obj_with_lists.model.r1_reactants) == 2
assert hasattr(obj_with_lists.model, 'r1_products')
assert isinstance(obj_with_lists.model.r1_products, Set)
assert len(obj_with_lists.model.r1_products) == 1
assert value(obj.model.u_C["NH3","r1",obj.model.z.first()]) == -1
assert value(obj.model.u_q["ZNH4","r1",obj.model.z.first()]) == 1
assert value(obj_with_lists.model.u_C["NH3","r1",obj_with_lists.model.z.first()]) == -1
assert value(obj_with_lists.model.u_q["ZNH4","r1",obj_with_lists.model.z.first()]) == 1
assert value(obj_with_lists.model.u_q["ZH","r1",obj_with_lists.model.z.first()]) == -1
assert value(obj.model.rxn_orders["r1","NH3"]) == 1
assert value(obj.model.rxn_orders["r1","ZH"]) == 1
assert value(obj.model.rxn_orders["r1","ZNH4"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","NH3"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","ZH"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","ZNH4"]) == 1
@pytest.mark.unit
def test_set_isothermal_temp(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_isothermal_temp("Unaged","250C",250+273.15)
obj_with_lists.set_isothermal_temp("Unaged","250C",250+273.15)
obj_with_lists.set_isothermal_temp("2hr","250C",250+273.15)
obj_with_lists.set_isothermal_temp("Unaged","300C",300+273.15)
obj_with_lists.set_isothermal_temp("2hr","300C",300+273.15)
assert value(obj.model.T["Unaged","250C",obj.model.z.first(),obj.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["Unaged","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["2hr","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["Unaged","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 300+273.15
assert value(obj_with_lists.model.T["2hr","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 300+273.15
@pytest.mark.initialization
def test_build_constraints(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.build_constraints()
obj_with_lists.build_constraints()
assert hasattr(obj.model, 'bulk_cons')
assert isinstance(obj.model.bulk_cons, Constraint)
assert hasattr(obj.model, 'pore_cons')
assert isinstance(obj.model.pore_cons, Constraint)
assert hasattr(obj.model, 'surf_cons')
assert isinstance(obj.model.surf_cons, Constraint)
assert hasattr(obj.model, 'site_cons')
assert isinstance(obj.model.site_cons, Constraint)
assert hasattr(obj_with_lists.model, 'bulk_cons')
assert isinstance(obj_with_lists.model.bulk_cons, Constraint)
assert hasattr(obj_with_lists.model, 'pore_cons')
assert isinstance(obj_with_lists.model.pore_cons, Constraint)
assert hasattr(obj_with_lists.model, 'surf_cons')
assert isinstance(obj_with_lists.model.surf_cons, Constraint)
@pytest.mark.initialization
def test_discretization_fd(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=5,elems=5,colpoints=2)
obj_with_lists.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=5,elems=5,colpoints=2)
assert hasattr(obj.model, 'dCbdz_edge')
assert isinstance(obj.model.dCbdz_edge, Constraint)
assert hasattr(obj_with_lists.model, 'dCbdz_edge')
assert isinstance(obj_with_lists.model.dCbdz_edge, Constraint)
assert len(obj.model.t) == len(obj_with_lists.model.t)
assert len(obj.model.z) == len(obj_with_lists.model.z)
assert pytest.approx(111.63437198706396, rel=1e-3) == \
value(obj.model.P["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(
value(obj_with_lists.model.P["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.P["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(28882.87336113903, rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.v["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(0.0006748820366629658, rel=1e-3) == \
value(obj.model.rho["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.rho["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.rho["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(0.0002753695869940695, rel=1e-3) == \
value(obj.model.mu["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.mu["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.mu["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(91.7218236329034, rel=1e-3) == \
value(obj.model.Re["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.Re["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.Re["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(0.4129058808030342, rel=1e-3) == \
value(obj.model.Sc["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.Sc["NH3","Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.Sc["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(4.058710793831378, rel=1e-3) == \
value(obj.model.Sh["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.Sh["NH3","Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.Sh["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(858.2004564100874, rel=1e-3) == \
value(obj.model.km["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.km["NH3","Unaged","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.km["NH3","Unaged","250C",obj.model.z.first(),obj.model.t.first()])
@pytest.mark.unit
def test_set_initial_conditions(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_const_IC("NH3","Unaged","250C",0)
obj.set_const_IC("ZNH4","Unaged","250C",0)
obj_with_lists.set_const_IC_in_ppm("NH3","Unaged","250C",0)
obj_with_lists.set_const_IC_in_ppm("NO","Unaged","250C",300)
obj_with_lists.set_const_IC_in_ppm("NH3","2hr","250C",0)
obj_with_lists.set_const_IC_in_ppm("NO","2hr","250C",300)
obj_with_lists.set_const_IC_in_ppm("NH3","Unaged","300C",0)
obj_with_lists.set_const_IC_in_ppm("NO","Unaged","300C",300)
obj_with_lists.set_const_IC_in_ppm("NH3","2hr","300C",0)
obj_with_lists.set_const_IC_in_ppm("NO","2hr","300C",300)
obj_with_lists.set_const_IC("ZNH4","Unaged","250C",0)
obj_with_lists.set_const_IC("ZH","Unaged","250C",0.1152619)
obj_with_lists.set_const_IC("ZNH4","2hr","250C",0)
obj_with_lists.set_const_IC("ZH","2hr","250C",0.0952619)
obj_with_lists.set_const_IC("ZNH4","Unaged","300C",0)
obj_with_lists.set_const_IC("ZH","Unaged","300C",0.1152619)
obj_with_lists.set_const_IC("ZNH4","2hr","300C",0)
obj_with_lists.set_const_IC("ZH","2hr","300C",0.0952619)
assert pytest.approx(6.9762939977887255e-06, rel=1e-3) == \
value(obj_with_lists.model.Cb["NO","Unaged","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()])
assert pytest.approx(6.36770165740761e-06, rel=1e-3) == \
value(obj_with_lists.model.Cb["NO","Unaged","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()])
assert pytest.approx(6.9762939977887255e-06, rel=1e-3) == \
value(obj_with_lists.model.C["NO","Unaged","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()])
assert pytest.approx(6.36770165740761e-06, rel=1e-3) == \
value(obj_with_lists.model.C["NO","Unaged","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()])
assert pytest.approx(1e-20, rel=1e-3) == \
value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(1e-20, rel=1e-3) == \
value(obj.model.C["NH3","Unaged","250C",
obj.model.z.first(),obj.model.t.first()])
@pytest.mark.unit
def test_set_boundary_conditions(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_time_dependent_BC("NH3","Unaged","250C",
time_value_pairs=[(4,6.9762939977887255e-06)],
initial_value=0)
obj_with_lists.set_time_dependent_BC_in_ppm("NH3","Unaged","250C",
time_value_pairs=[(4,300)],
initial_value=0)
obj_with_lists.set_time_dependent_BC_in_ppm("NH3","2hr","250C",
time_value_pairs=[(4,300)],
initial_value=0)
obj_with_lists.set_time_dependent_BC_in_ppm("NH3","Unaged","300C",
time_value_pairs=[(4,300)],
initial_value=0)
obj_with_lists.set_time_dependent_BC_in_ppm("NH3","2hr","300C",
time_value_pairs=[(4,300)],
initial_value=0)
obj_with_lists.set_const_BC("NO","Unaged","250C",6.9762939977887255e-06)
obj_with_lists.set_const_BC_in_ppm("NO","2hr","250C",300)
obj_with_lists.set_const_BC("NO","Unaged","300C",6.36770165740761e-06)
obj_with_lists.set_const_BC_in_ppm("NO","2hr","300C",300)
assert pytest.approx(6.9762939977887255e-06, rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.first(),4.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.first(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.first(),4.0])
assert pytest.approx(value(obj_with_lists.model.Cb["NO","Unaged","250C",
obj_with_lists.model.z.first(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NO","2hr","250C",
obj_with_lists.model.z.first(),4.0])
assert pytest.approx(value(obj_with_lists.model.Cb["NO","Unaged","300C",
obj_with_lists.model.z.first(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NO","2hr","300C",
obj_with_lists.model.z.first(),4.0])
@pytest.mark.initialization
def test_initialize_auto_scaling(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.initialize_auto_scaling()
obj_with_lists.initialize_auto_scaling()
assert hasattr(obj.model, 'scaling_factor')
assert isinstance(obj.model.scaling_factor, Suffix)
assert hasattr(obj_with_lists.model, 'scaling_factor')
assert isinstance(obj_with_lists.model.scaling_factor, Suffix)
@pytest.mark.solver
def test_initialization_solve(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
(stat1, cond1) = obj.initialize_simulator()
assert cond1 == TerminationCondition.optimal
assert stat1 == SolverStatus.ok
(stat2, cond2) = obj_with_lists.initialize_simulator()
assert cond2 == TerminationCondition.optimal
assert stat2 == SolverStatus.ok
@pytest.mark.initialization
def test_final_auto_scaling(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.finalize_auto_scaling()
obj_with_lists.finalize_auto_scaling()
assert not hasattr(obj.model, 'obj')
assert not hasattr(obj_with_lists.model, 'obj')
@pytest.mark.solver
def test_full_solve(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
(stat1, cond1) = obj.run_solver()
assert cond1 == TerminationCondition.optimal
assert stat1 == SolverStatus.ok
(stat2, cond2) = obj_with_lists.run_solver()
assert cond2 == TerminationCondition.optimal
assert stat2 == SolverStatus.ok
assert pytest.approx(28882.87336113903, rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()]), rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),0.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),0.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),4.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),8.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),8.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),12.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),12.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),16.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),16.0])
assert pytest.approx(value(obj.model.Cb["NH3","Unaged","250C",
obj.model.z.last(),20.0]), rel=1e-3) == \
value(obj_with_lists.model.Cb["NH3","Unaged","250C",
obj_with_lists.model.z.last(),20.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),0.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),0.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),4.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),8.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),8.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),12.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),12.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),16.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),16.0])
assert pytest.approx(value(obj.model.q["ZNH4","Unaged","250C",
obj.model.z.last(),20.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZNH4","Unaged","250C",
obj_with_lists.model.z.last(),20.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),0.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),0.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),4.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),4.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),8.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),8.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),12.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),12.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),16.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),16.0])
assert pytest.approx(value(obj.model.S["ZH","Unaged","250C",
obj.model.z.last(),20.0]), rel=1e-3) == \
value(obj_with_lists.model.q["ZH","Unaged","250C",
obj_with_lists.model.z.last(),20.0])
| 43.626866
| 132
| 0.609279
|
04e5e53bf10d135544089dd3f65440b068532eee
| 350
|
py
|
Python
|
django_pivot/tests/test_sqlite_settings.py
|
thatch/django-pivot
|
b230376b56d8cc2a11e8620f5a2950e5e171e747
|
[
"MIT"
] | 187
|
2016-11-17T01:21:12.000Z
|
2022-03-19T11:51:03.000Z
|
django_pivot/tests/test_sqlite_settings.py
|
thatch/django-pivot
|
b230376b56d8cc2a11e8620f5a2950e5e171e747
|
[
"MIT"
] | 22
|
2017-10-17T17:57:50.000Z
|
2022-02-11T03:29:27.000Z
|
django_pivot/tests/test_sqlite_settings.py
|
thatch/django-pivot
|
b230376b56d8cc2a11e8620f5a2950e5e171e747
|
[
"MIT"
] | 15
|
2017-11-01T05:29:24.000Z
|
2022-02-24T23:09:52.000Z
|
BACKEND = 'sqlite'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django_pivot.tests.pivot',
)
SITE_ID = 1,
SECRET_KEY = 'secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
| 15.909091
| 48
| 0.634286
|
568dbf50fde914c45f79fde0aece6c7a86a0f4ce
| 1,287
|
py
|
Python
|
aiida/cmdline/params/types/node.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
aiida/cmdline/params/types/node.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/cmdline/params/types/node.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Module to define the custom click param type for node
"""
from .identifier import IdentifierParamType
class NodeParamType(IdentifierParamType):
"""
The ParamType for identifying Node entities or its subclasses
"""
name = 'Node'
@property
def orm_class_loader(self):
"""
Return the orm entity loader class, which should be a subclass of OrmEntityLoader. This class is supposed
to be used to load the entity for a given identifier
:return: the orm entity loader class for this ParamType
"""
from aiida.orm.utils.loaders import NodeEntityLoader
return NodeEntityLoader
| 39
| 113
| 0.534577
|
c2cc176f7ed165eda293198e4d0d3cd69d5e9d8f
| 4,344
|
py
|
Python
|
2d_triangular/filling_1over3_BC_P_P/2d_triangular.py
|
ryuikaneko/tight_binding_shell_condition
|
37ed5f1497b6e757873831ea515a29e4a9f2e50e
|
[
"MIT"
] | null | null | null |
2d_triangular/filling_1over3_BC_P_P/2d_triangular.py
|
ryuikaneko/tight_binding_shell_condition
|
37ed5f1497b6e757873831ea515a29e4a9f2e50e
|
[
"MIT"
] | null | null | null |
2d_triangular/filling_1over3_BC_P_P/2d_triangular.py
|
ryuikaneko/tight_binding_shell_condition
|
37ed5f1497b6e757873831ea515a29e4a9f2e50e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding:utf-8
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def ene(kx,ky):
return -2.0*(np.cos(kx)+np.cos(ky)+np.cos(kx+ky))
def calc_k_ene(Lx,Ly,BCx,BCy):
if BCx == 'AP' or BCx == 'antiperiodic':
xshift = 0.5
elif BCx == 'P' or BCx == 'periodic':
xshift = 0.0
else:
xshift = 0.0
if BCy == 'AP' or BCy == 'antiperiodic':
yshift = 0.5
elif BCy == 'P' or BCy == 'periodic':
yshift = 0.0
else:
yshift = 0.0
list_kx = np.array([2.0*np.pi*((x+xshift)/Lx-float(Lx//2)/Lx) for x in range(Lx)])
list_ky = np.array([2.0*np.pi*((y+yshift)/Ly-float(Ly//2)/Ly) for y in range(Ly)])
list_enekxky = np.array([ene(kx,ky) for ky in list_ky for kx in list_kx])
list_intkxky = np.array([Lx*y+x for y in range(Ly) for x in range(Lx)])
return list_enekxky, list_intkxky, xshift, yshift
def calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom):
filling = float(filling_numer)/float(filling_denom)
numel = Lx*Ly*filling_numer//filling_denom
list_enekxky, list_intkxky, xshift, yshift = calc_k_ene(Lx,Ly,BCx,BCy)
list_ind = np.argsort(list_enekxky)
list_sorted_enekxky = list_enekxky[list_ind]
list_sorted_intkxky = list_intkxky[list_ind]
chemipo = 0.5*(list_sorted_enekxky[numel] + list_sorted_enekxky[numel-1])
totene = np.sum(list_sorted_enekxky[:numel])
gap = list_sorted_enekxky[numel] - list_sorted_enekxky[numel-1]
if np.abs(gap)>1e-10:
shellcond = 'closed'
else:
shellcond = 'open'
return filling, numel, chemipo, totene, gap, shellcond, \
list_sorted_enekxky, list_sorted_intkxky, xshift, yshift
def main():
BCx = 'P'
BCy = 'P'
filling_numer = 1
# filling_denom = 2
filling_denom = 3
list_L = []
list_enedens = []
file = open("dat_2d_triangular",'w')
file.write("# L filling(=n/2) BCx BCy num_electrons(=nup=ndown) chemi_potential ene ene_dens gap shell_cond\n")
for L in range(6,60,6):
Lx = L
Ly = L
filling, numel, chemipo, totene, gap, shellcond, \
list_enekxky, list_intkxky, xshift, yshift = \
calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)
list_L.append(L)
list_enedens.append(totene/Lx/Ly)
file.write("{} {} {} {} {} {} {} {} {} {}\n".format(\
L,filling,BCx,BCy,numel,chemipo,totene,totene/Lx/Ly,gap,shellcond))
file.close()
list_L = np.array(list_L)
list_enedens = np.array(list_enedens)
plt.xlabel("1/L^2")
plt.ylabel("E/L^2")
plt.plot(1.0/list_L**2,list_enedens,color='blue',marker='o',markerfacecolor='white')
plt.savefig("fig_2d_triangular_enedens.png")
plt.cla()
plt.clf()
L = 30
Lx = L
Ly = L
filling, numel, chemipo, totene, gap, shellcond, \
list_enekxky, list_intkxky, xshift, yshift = \
calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)
list_intkx = list_intkxky%Lx
list_intky = list_intkxky//Lx
list_kx = (list_intkx.astype(np.float64)+xshift)/Lx-float(Lx//2)/Lx
list_ky = (list_intky.astype(np.float64)+yshift)/Ly-float(Ly//2)/Ly
plt.xlabel("kx/pi")
plt.ylabel("ky/pi")
plt.xticks([-0.5,-0.25,0,0.25,0.5])
plt.yticks([-0.5,-0.25,0,0.25,0.5])
plt.xlim(-0.55,0.55)
plt.ylim(-0.55,0.55)
## https://stackoverflow.com/questions/17990845/how-to-equalize-the-scales-of-x-axis-and-y-axis-in-python-matplotlib
# plt.axis('equal')
plt.gca().set_aspect('equal',adjustable='box')
plt.plot(list_kx,list_ky,color='blue',marker='o',\
markerfacecolor='white',linestyle='None')
plt.plot(list_kx[:numel],list_ky[:numel],color='blue',marker='o',\
markerfacecolor='blue',linestyle='None')
plt.savefig("fig_2d_triangular_fermisurface.png")
plt.cla()
plt.clf()
L = 2**9
Lx = L
Ly = L
nbins = L//2
filling, numel, chemipo, totene, gap, shellcond, \
list_enekxky, list_intkxky, xshift, yshift = \
calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)
plt.xlabel("E")
plt.ylabel("DOS")
plt.hist(list_enekxky-chemipo,bins=nbins,density=True)
plt.savefig("fig_2d_triangular_dos.png")
if __name__ == "__main__":
main()
| 35.606557
| 116
| 0.642495
|
bde33fa7f6bb5ebaf5009e45cc3769409e98f2cf
| 2,492
|
py
|
Python
|
src/mem/probes/BaseMemProbe.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 135
|
2016-10-21T03:31:49.000Z
|
2022-03-25T01:22:20.000Z
|
src/mem/probes/BaseMemProbe.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 35
|
2017-03-10T17:57:46.000Z
|
2022-02-18T17:34:16.000Z
|
src/mem/probes/BaseMemProbe.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 48
|
2016-12-08T12:03:13.000Z
|
2022-02-16T09:16:13.000Z
|
# Copyright (c) 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class BaseMemProbe(SimObject):
type = 'BaseMemProbe'
abstract = True
cxx_header = "mem/probes/base.hh"
manager = VectorParam.SimObject(Parent.any,
"Probe manager(s) to instrument")
probe_name = Param.String("PktRequest", "Memory request probe to use")
| 49.84
| 74
| 0.775682
|
bc6d74ede8a803e44b6ddd29b2d9fce6eee1d6ce
| 1,088
|
py
|
Python
|
hotel/migrations/0003_feedback.py
|
ssd-course-project/hotel
|
3920fe522daceb2554d8850c62202988b745827b
|
[
"MIT"
] | null | null | null |
hotel/migrations/0003_feedback.py
|
ssd-course-project/hotel
|
3920fe522daceb2554d8850c62202988b745827b
|
[
"MIT"
] | 27
|
2019-03-10T05:59:58.000Z
|
2019-05-16T07:57:06.000Z
|
hotel/migrations/0003_feedback.py
|
ssd-course-project/hotel
|
3920fe522daceb2554d8850c62202988b745827b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-04-09 13:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clients', '0001_initial'),
('hotel', '0002_auto_20190403_1917'),
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Время создания')),
('rating', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default='5', verbose_name='Оценка')),
('text', models.TextField(verbose_name='Текст отзыва')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clients.Client')),
],
options={
'verbose_name': 'Отзывы',
'verbose_name_plural': 'Отзывы',
},
),
]
| 36.266667
| 144
| 0.568015
|
7e17b4316da04d14f2e9c734941138762a5994a0
| 154,122
|
py
|
Python
|
greykite/tests/algo/forecast/silverkite/test_forecast_silverkite.py
|
briancpark/greykite
|
2f484978a7ed206ebd9356e02fc1fb881cd25205
|
[
"BSD-2-Clause"
] | null | null | null |
greykite/tests/algo/forecast/silverkite/test_forecast_silverkite.py
|
briancpark/greykite
|
2f484978a7ed206ebd9356e02fc1fb881cd25205
|
[
"BSD-2-Clause"
] | null | null | null |
greykite/tests/algo/forecast/silverkite/test_forecast_silverkite.py
|
briancpark/greykite
|
2f484978a7ed206ebd9356e02fc1fb881cd25205
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
from datetime import timedelta
import matplotlib
import numpy as np
import modin.pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from greykite.algo.changepoint.adalasso.changepoint_detector import ChangepointDetector
from greykite.algo.changepoint.adalasso.changepoints_utils import get_changepoint_dates_from_changepoints_dict
from greykite.algo.forecast.silverkite.forecast_silverkite import SilverkiteForecast
from greykite.algo.forecast.silverkite.forecast_simple_silverkite_helper import cols_interact
from greykite.algo.forecast.silverkite.forecast_simple_silverkite_helper import generate_holiday_events
from greykite.common.constants import ADJUSTMENT_DELTA_COL
from greykite.common.constants import END_DATE_COL
from greykite.common.constants import ERR_STD_COL
from greykite.common.constants import EVENT_DF_LABEL_COL
from greykite.common.constants import START_DATE_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.data_loader import DataLoader
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.evaluation import calc_pred_err
from greykite.common.features.adjust_anomalous_data import adjust_anomalous_data
from greykite.common.features.normalize import normalize_df
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.features.timeseries_features import fourier_series_multi_fcn
from greykite.common.features.timeseries_features import get_changepoint_string
from greykite.common.features.timeseries_features import get_evenly_spaced_changepoints_values
from greykite.common.features.timeseries_features import get_fourier_col_name
from greykite.common.features.timeseries_features import get_holidays
from greykite.common.features.timeseries_impute import impute_with_lags
from greykite.common.features.timeseries_lags import build_autoreg_df
from greykite.common.features.timeseries_lags import build_autoreg_df_multi
from greykite.common.python_utils import assert_equal
from greykite.common.python_utils import get_pattern_cols
from greykite.common.testing_utils import generate_anomalous_data
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils import generate_df_with_holidays
from greykite.common.testing_utils import generate_df_with_reg_for_tests
from greykite.common.testing_utils import generic_test_adjust_anomalous_data
from greykite.common.viz.timeseries_plotting_mpl import plt_compare_timeseries
matplotlib.use("agg") # noqa: E402
import matplotlib.pyplot as plt # isort:skip
@pytest.fixture
def hourly_data():
"""Generate 500 days of hourly data for tests"""
return generate_df_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
@pytest.fixture
def lagged_regressor_dict():
"""Generate a dictionary of 3 lagged regressors with different dtypes"""
return {
"regressor1": {
"lag_dict": {"orders": [1, 168]},
"agg_lag_dict": {
"orders_list": [[168, 168 * 2, 168 * 3]],
"interval_list": [(169, 168 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()},
"regressor_bool": {
"lag_dict": {"orders": [1, 168]},
"agg_lag_dict": {
"orders_list": [[168, 168 * 2, 168 * 3]],
"interval_list": [(169, 168 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()},
"regressor_categ": {
"lag_dict": {"orders": [1, 168]},
"series_na_fill_func": lambda s: s.bfill().ffill()}}
def plt_comparison_forecast_vs_observed(
fut_df,
test_df,
file_name=None,
plt_title=""):
"""A local function for comparing forecasts with observed on test set.
This function is only for tests here.
Imports are delibrately put inside the function as the function
will be run only selectively if user descides to make plots
:param fut_df: pd.DataFrame
dataframe with predictions
expected to have a VALUE_COL at least
:param test_df: pd.DataFrame
dataframe which includes the observed values
expected to have at least two columns: TIME_COL, VALUE_COL
:param file_name: Optional[str]
File name for the plot to be saved
:param plt_title: str
title of the plot, default: ""
"""
fut_df[TIME_COL] = test_df[TIME_COL]
plt_compare_timeseries(
df_dict={"obs": test_df, "forecast": fut_df},
time_col=TIME_COL,
value_col=VALUE_COL,
colors_dict={"obs": "red", "forecast": "green"},
plt_title=plt_title)
if file_name is not None:
plt.savefig(file_name)
plt.close()
def plt_check_ci(fut_df, test_df):
"""A local function for creating conf. interval plots within this test file.
:param fut_df: pd.DataFrame
the dataframe which includes future predictions in its VALUE_COL column
:param test_df: pd.DataFrame
the dataframe which includes true values in its VALUE_COL column
"""
# imports are done within the function as the function is not
# automatically run when tests run
import plotly
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTED_LOWER_COL
from greykite.common.constants import PREDICTED_UPPER_COL
from greykite.common.viz.timeseries_plotting import plot_forecast_vs_actual
# splitting the ci column to create lower and upper columns
ci_df = pd.DataFrame(fut_df["y_quantile_summary"].tolist())
assert ci_df.shape[1] == 2, "ci_df must have exactly two columns"
ci_df.columns = [PREDICTED_LOWER_COL, PREDICTED_UPPER_COL]
# adding necessary columns
ci_df[ACTUAL_COL] = test_df["y"]
ci_df[PREDICTED_COL] = fut_df["y"]
ci_df[TIME_COL] = test_df[TIME_COL]
fig = plot_forecast_vs_actual(
df=ci_df,
time_col=TIME_COL,
actual_col=ACTUAL_COL,
predicted_col=PREDICTED_COL,
predicted_lower_col=PREDICTED_LOWER_COL,
predicted_upper_col=PREDICTED_UPPER_COL,
ylabel=VALUE_COL,
train_end_date=None,
title=None,
actual_points_color="red",
actual_points_size=2.0,
forecast_curve_color="blue",
actual_color_opacity=0.6,
ci_band_color="rgba(0, 0, 200, 0.4)",
ci_boundary_curve_color="rgb(56, 119, 166, 0.95)", # blue navy color with opacity of 0.95
ci_boundary_curve_width=0.5)
plotly.offline.plot(fig)
def test_forecast_silverkite_hourly(hourly_data):
"""Tests silverkite on hourly data with linear model fit"""
train_df = hourly_data["train_df"]
test_df = hourly_data["test_df"]
fut_time_num = hourly_data["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"])
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
assert trained_model["x_mat"]["ct1"][0] == 0 # this should be true when origin_for_time_vars=None
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
# with normalization
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"],
normalize_method="min_max")
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
assert trained_model["x_mat"]["ct1"][0] == 0 # this should be true when origin_for_time_vars=None
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_hourly_regressor():
"""Tests silverkite with regressors and random forest fit"""
hourly_data = generate_df_with_reg_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
train_df = hourly_data["train_df"].reset_index(drop=True)
test_df = hourly_data["test_df"].reset_index(drop=True)
fut_time_num = hourly_data["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
training_fraction=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5],
"seas_names": None}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"] + regressor_cols,
fit_algorithm="rf",
fit_algorithm_params={"min_samples_split": 3})
assert trained_model["ml_model"].min_samples_split == 3
# three equivalent ways of generating predictions
result1 = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=test_df[regressor_cols])
result2 = silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL]],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=test_df[regressor_cols])
result3 = silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL] + regressor_cols],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None)
# checks for equality of contents, ignoring row/column order
# `df` may contain extra columns not required by `silverkite.predict_n(_no_sim`
# and VALUE_COL is the last column in `silverkite.predict_n(_no_sim` but the
# original order in `df` is preserved
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result1, result3, check_like=True)
err = calc_pred_err(test_df[VALUE_COL], result1[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
# checks to make sure the frequency is set properly
assert np.array_equal(result1[TIME_COL].values, test_df[TIME_COL].values)
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_hourly_lagged_regressor(lagged_regressor_dict):
"""Tests silverkite with regressors and random forest fit"""
hourly_data = generate_df_with_reg_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
train_df = hourly_data["train_df"].reset_index(drop=True)
test_df = hourly_data["test_df"].reset_index(drop=True)
fut_time_num = hourly_data["fut_time_num"]
# Fits a model that only contains lagged regressors but no regressors
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
training_fraction=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5],
"seas_names": None}),
extra_pred_cols=["dow_hr", "ct1"],
lagged_regressor_dict=lagged_regressor_dict)
lagged_regressor_cols = trained_model["lagged_regressor_cols"]
# Three equivalent ways of generating predictions
result1 = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=test_df[lagged_regressor_cols])
result2 = silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL]],
trained_model=trained_model,
past_df=train_df[lagged_regressor_cols],
new_external_regressor_df=test_df[lagged_regressor_cols])
result3 = silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL] + lagged_regressor_cols],
trained_model=trained_model,
past_df=train_df[lagged_regressor_cols],
new_external_regressor_df=None)
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result1, result3, check_like=True)
err = calc_pred_err(test_df[VALUE_COL], result1[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 1) == 0.8
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 1) == 2.0
# Checks to make sure the frequency is set properly
assert np.array_equal(result1[TIME_COL].values, test_df[TIME_COL].values)
# Tests when no `new_external_regressor_df` is provided
# If `min_lagged_regressor_order` (in this case 1) is greater than or equal to test_df.shape[0],
# the prediction should run without any error even without `new_external_regressor_df`
# and the following two should return identical results
result4 = silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL]].head(1),
trained_model=trained_model,
past_df=train_df[lagged_regressor_cols],
new_external_regressor_df=None)
result5 = silverkite.predict_n_no_sim(
fut_time_num=1,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
assert_frame_equal(result4, result5, check_like=True)
# Otherwise, if `min_lagged_regressor_order` is less than `fut_time_num`
# Testing for Exception
expected_match = (
"All columns in `lagged_regressor_cols` must appear in `df`")
# lagged_regressor_cols is None
with pytest.raises(ValueError, match=expected_match):
silverkite.predict_no_sim(
fut_df=test_df[[TIME_COL, VALUE_COL]].head(2),
trained_model=trained_model,
past_df=train_df[lagged_regressor_cols],
new_external_regressor_df=None)
def test_forecast_silverkite_freq():
"""Tests forecast_silverkite at different frequencies"""
# A wide variety of frequencies listed here:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
frequencies = [
"B", "W", "W-SAT", "W-TUE", "M", "SM",
"MS", "SMS", "CBMS", "BM", "B", "Q",
"QS", "BQS", "BQ-AUG", "Y", "YS",
"AS-SEP", "BH", "T", "S"]
periods = 50
train_frac = 0.8
train_test_thresh_index = int(periods * train_frac * 0.8)
for freq in frequencies:
df = generate_df_for_tests(
freq=freq,
periods=50,
train_frac=0.8,
train_start_date=datetime.datetime(2018, 5, 1))
train_df = df["train_df"]
test_df = df["test_df"]
fut_time_num = df["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=train_df[TIME_COL][train_test_thresh_index],
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt"],
changepoints_dict={
"method": "uniform",
"n_changepoints": 2,
"continuous_time_col": "ct1",
"growth_func": lambda x: x})
changepoint_dates = get_changepoint_dates_from_changepoints_dict(
changepoints_dict={
"method": "uniform",
"n_changepoints": 2,
"continuous_time_col": "ct1",
"growth_func": lambda x: x},
df=train_df,
time_col=TIME_COL
)
changepoint_cols = get_pattern_cols(trained_model["pred_cols"], "^changepoint")
assert len(changepoint_cols) == 2
string_format = get_changepoint_string(changepoint_dates)
assert "changepoint0" + string_format[0] in trained_model["pred_cols"]
assert "changepoint1" + string_format[1] in trained_model["pred_cols"]
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq=freq,
new_external_regressor_df=None)
# checks silverkite.predict_n(_no_sim
fut_df_via_predict = silverkite.predict_no_sim(
fut_df=test_df,
trained_model=trained_model)
assert_frame_equal(
fut_df[[TIME_COL]],
fut_df_via_predict[[TIME_COL]],
check_like=True)
assert_frame_equal(
fut_df,
fut_df_via_predict,
check_like=True)
def test_forecast_silverkite_changepoints():
"""Tests forecast_silverkite on peyton manning data
(with changepoints and missing values)
"""
dl = DataLoader()
df_pt = dl.load_peyton_manning()
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=df_pt,
time_col="ts",
value_col="y",
changepoints_dict={
"method": "auto",
"yearly_seasonality_order": 6,
"resample_freq": "2D",
"actual_changepoint_min_distance": "100D",
"potential_changepoint_distance": "50D",
"no_changepoint_proportion_from_end": 0.3
}
)
# "df" preserves the original indices
assert_equal(trained_model["df"].index, df_pt.index)
# "df_dropna" drops the correct indices
assert_equal(trained_model["df_dropna"].index, df_pt.dropna().index)
changepoint_values = trained_model["normalized_changepoint_values"]
df_length = trained_model["x_mat"]["ct1"].iloc[-1]
cp_distance = timedelta(days=100) / (pd.to_datetime(df_pt["ts"].iloc[-1]) - pd.to_datetime(df_pt["ts"].iloc[0]))
# has change points
assert len(changepoint_values) >= 0
# checks no change points at the end
assert changepoint_values[-1] <= df_length * 0.7
# checks change point distance is at least "100D"
min_cp_dist = min([changepoint_values[i] - changepoint_values[i - 1] for i in range(1, len(changepoint_values))])
assert min_cp_dist >= df_length * cp_distance
# checks the number of change points is consistent with the change points detected by ChangepointDetector
cd = ChangepointDetector()
res = cd.find_trend_changepoints(
df=df_pt,
time_col="ts",
value_col="y",
yearly_seasonality_order=6,
resample_freq="2D",
actual_changepoint_min_distance="100D",
potential_changepoint_distance="50D",
no_changepoint_proportion_from_end=0.3
)
changepoint_dates = res["trend_changepoints"]
assert len(changepoint_values) == len(changepoint_dates)
def test_forecast_silverkite_seasonality_changepoints():
# test forecast_silverkite on peyton manning data
dl = DataLoader()
df_pt = dl.load_peyton_manning()
silverkite = SilverkiteForecast()
# seasonality changepoints is None if dictionary is not provided
trained_model = silverkite.forecast(
df=df_pt,
time_col="ts",
value_col="y",
changepoints_dict={
"method": "auto"
},
seasonality_changepoints_dict=None
)
assert trained_model["seasonality_changepoint_dates"] is None
assert trained_model["seasonality_changepoint_result"] is None
# all test cases below include seasonality changepoint detection.
# without trend change points
trained_model = silverkite.forecast(
df=df_pt,
time_col="ts",
value_col="y",
changepoints_dict=None,
seasonality_changepoints_dict={}
)
assert trained_model["seasonality_changepoint_dates"] is not None
assert trained_model["seasonality_changepoint_result"] is not None
assert "weekly" in trained_model["seasonality_changepoint_dates"].keys()
assert "yearly" in trained_model["seasonality_changepoint_dates"].keys()
# with different seasonality change point parameters
trained_model = silverkite.forecast(
df=df_pt,
time_col="ts",
value_col="y",
changepoints_dict={
"method": "auto"
},
seasonality_changepoints_dict={
"no_changepoint_distance_from_end": "730D"
}
)
assert trained_model["seasonality_changepoint_dates"] is not None
assert trained_model["seasonality_changepoint_result"] is not None
assert "weekly" in trained_model["seasonality_changepoint_dates"].keys()
assert "yearly" in trained_model["seasonality_changepoint_dates"].keys()
no_changepoint_proportion_from_end = timedelta(days=730) / (
pd.to_datetime(df_pt["ts"].iloc[-1]) - pd.to_datetime(df_pt["ts"].iloc[0]))
last_date_to_have_changepoint = pd.to_datetime(df_pt["ts"].iloc[int(
df_pt.shape[0] * (1 - no_changepoint_proportion_from_end))])
for component in trained_model["seasonality_changepoint_dates"].keys():
if len(trained_model["seasonality_changepoint_dates"][component]) > 0:
assert trained_model["seasonality_changepoint_dates"][component][-1] <= last_date_to_have_changepoint
# tests forecasting the future
pred = silverkite.predict_no_sim(
fut_df=pd.DataFrame({
"ts": pd.date_range(start=df_pt["ts"].iloc[-1], periods=10, freq="D")
}),
trained_model=trained_model
)
assert pred.shape[0] == 10
assert "y" in pred.columns
def test_forecast_silverkite_hourly_changepoint_uniform(hourly_data):
"""Tests forecast_silverkite with uniform changepoints"""
train_df = hourly_data["train_df"]
test_df = hourly_data["test_df"]
fut_time_num = hourly_data["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
changepoints_dict={
"method": "uniform",
"n_changepoints": 2,
"continuous_time_col": "ct1",
"growth_func": lambda x: x})
changepoint_dates = get_changepoint_dates_from_changepoints_dict(
changepoints_dict={
"method": "uniform",
"n_changepoints": 2,
"continuous_time_col": "ct1",
"growth_func": lambda x: x},
df=train_df,
time_col=TIME_COL
)
changepoint_cols = get_pattern_cols(trained_model["pred_cols"], "^changepoint")
# checks that there are two changepoints
assert len(changepoint_cols) == 2
assert "changepoint0" + pd.to_datetime(changepoint_dates[0]).strftime('_%Y_%m_%d_%H') \
in trained_model["pred_cols"]
assert "changepoint1" + pd.to_datetime(changepoint_dates[1]).strftime('_%Y_%m_%d_%H') \
in trained_model["pred_cols"]
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
# checks predict_n
fut_df_via_predict = silverkite.predict_no_sim(
fut_df=test_df,
trained_model=trained_model)
assert_frame_equal(
fut_df[[TIME_COL]],
fut_df_via_predict[[TIME_COL]],
check_like=True)
assert_frame_equal(
fut_df,
fut_df_via_predict,
check_like=True)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_hourly_changepoint_custom(hourly_data):
"""Tests forecast_silverkite with custom changepoints"""
train_df = hourly_data["train_df"]
test_df = hourly_data["test_df"]
fut_time_num = hourly_data["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
changepoints_dict={
"method": "custom",
"dates": [train_df[TIME_COL][100],
train_df[TIME_COL][500],
train_df[TIME_COL][1000]],
"continuous_time_col": "ct1",
"growth_func": lambda x: x})
changepoint_dates = get_changepoint_dates_from_changepoints_dict(
changepoints_dict={
"method": "custom",
"dates": [train_df[TIME_COL][100],
train_df[TIME_COL][500],
train_df[TIME_COL][1000]],
"continuous_time_col": "ct1",
"growth_func": lambda x: x},
df=train_df,
time_col=TIME_COL
)
changepoint_cols = get_pattern_cols(trained_model["pred_cols"], "^changepoint")
# checks that there are three changepoints
assert len(changepoint_cols) == 3
assert "changepoint0" + pd.to_datetime(changepoint_dates[0]).strftime('_%Y_%m_%d_%H') \
in trained_model["pred_cols"]
assert "changepoint1" + pd.to_datetime(changepoint_dates[1]).strftime('_%Y_%m_%d_%H') \
in trained_model["pred_cols"]
assert "changepoint2" + pd.to_datetime(changepoint_dates[2]).strftime('_%Y_%m_%d_%H') \
in trained_model["pred_cols"]
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
# checks `silverkite.predict_n(_no_sim`
fut_df_via_predict = silverkite.predict_no_sim(
fut_df=test_df,
trained_model=trained_model)
assert_frame_equal(
fut_df[[TIME_COL]],
fut_df_via_predict[[TIME_COL]],
check_like=True)
assert_frame_equal(
fut_df,
fut_df_via_predict,
check_like=True)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
def test_forecast_silverkite_hourly_changepoint_err(hourly_data):
"""Tests forecast_silverkite changepoint warnings and exceptions"""
train_df = hourly_data["train_df"]
silverkite = SilverkiteForecast()
with pytest.raises(
Exception,
match="changepoint method must be specified"):
silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
changepoints_dict={"n_changepoints": 2})
with pytest.raises(
NotImplementedError,
match="changepoint method.*not recognized"):
silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
changepoints_dict={"method": "not implemented"})
with pytest.warns(Warning) as record:
silverkite = SilverkiteForecast()
silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "changepoint1:ct_sqrt"],
changepoints_dict={
"method": "custom",
"dates": ["2048-07-01-23"],
"continuous_time_col": "ct1",
"growth_func": lambda x: x
})
assert "The following features in extra_pred_cols are removed for this training set:" \
" {'changepoint1:ct_sqrt'}." in record[0].message.args[0]
def test_forecast_silverkite_with_autoreg(hourly_data):
"""Tests forecast_silverkite autoregression"""
train_df = hourly_data["train_df"]
test_df = hourly_data["test_df"][:168].reset_index(drop=True) # one week of data for testing
test_past_df = train_df.copy()
fut_time_num = test_df.shape[0]
# we define a local function to apply `forecast_silverkite`
# with and without autoregression
def fit_forecast(
autoreg_dict=None,
test_past_df=None,
simulation_based=False):
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow"],
"period": [24.0, 7.0],
"order": [1, 1],
"seas_names": ["daily", "weekly"]}),
autoreg_dict=autoreg_dict,
simulation_based=simulation_based)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
return {
"fut_df": fut_df,
"trained_model": trained_model}
# without autoregression
fut_df = fit_forecast(
autoreg_dict=None,
test_past_df=None)["fut_df"]
# with autoregression
autoreg_dict = {
"lag_dict": {"orders": [168]},
"agg_lag_dict": {
"orders_list": [[168, 168 * 2, 168 * 3]],
"interval_list": [(168, 168 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
fut_df_with_autoreg = fit_forecast(
autoreg_dict=autoreg_dict,
test_past_df=test_past_df)["fut_df"]
# without autoregression
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 1) == 5.7
# with autoregression
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_autoreg[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 1) == 1.9
"""
figs_path = "~/figs/"
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=figs_path + "forecast_without_autoreg.png",
plt_title="without auto-regression")
plt_comparison_forecast_vs_observed(
fut_df=fut_df_with_autoreg,
test_df=test_df,
file_name=figs_path + "forecast_with_autoreg.png",
plt_title="with auto-regression")
"""
# with autoregression option of "auto"
forecast = fit_forecast(
autoreg_dict="auto",
test_past_df=test_past_df)
fut_df_with_autoreg = forecast["fut_df"]
trained_model = forecast["trained_model"]
autoreg_dict = trained_model["autoreg_dict"]
assert autoreg_dict["lag_dict"] == {"orders": [24, 25, 26]}
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[168, 336, 504]]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(24, 191), (192, 359)]
assert trained_model["forecast_horizon"] == 24
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_autoreg[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 1) == 2.5
# with autoregression option of "auto" and simulation based
forecast = fit_forecast(
autoreg_dict="auto",
test_past_df=test_past_df,
simulation_based=True)
fut_df_with_autoreg = forecast["fut_df"]
trained_model = forecast["trained_model"]
autoreg_dict = trained_model["autoreg_dict"]
assert autoreg_dict["lag_dict"] == {"orders": [1, 2, 3]}
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[168, 336, 504]]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 168), (169, 336)]
assert trained_model["forecast_horizon"] == 24
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_autoreg[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 1) == 2.8
expected_match = "is not implemented"
with pytest.raises(ValueError, match=expected_match):
fit_forecast(
autoreg_dict="non-existing-method",
test_past_df=test_past_df,
simulation_based=True)
def test_forecast_silverkite_with_lagged_regressor(lagged_regressor_dict):
"""Tests forecast_silverkite with lagged regressors"""
hourly_data = generate_df_with_reg_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
train_df = hourly_data["train_df"].reset_index(drop=True)
test_df = hourly_data["test_df"].reset_index(drop=True)
fut_time_num = hourly_data["fut_time_num"]
# we define a local function to apply `forecast_silverkite`
# with and without lagged regressors
def fit_forecast_with_regressor(
regressor_cols=[],
lagged_regressor_cols=[],
lagged_regressor_dict=None,
test_past_df=None):
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow"],
"period": [24.0, 7.0],
"order": [1, 1],
"seas_names": ["daily", "weekly"]}),
extra_pred_cols=["ct1"] + regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
all_extra_cols = regressor_cols
for col in lagged_regressor_cols:
if col not in all_extra_cols:
all_extra_cols.append(col)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=test_df[all_extra_cols])
return {
"fut_df": fut_df,
"trained_model": trained_model}
# without lagged regressors
res = fit_forecast_with_regressor(
regressor_cols=regressor_cols,
lagged_regressor_cols=[],
lagged_regressor_dict=None)
fut_df = res["fut_df"]
trained_model = res["trained_model"]
# with lagged regressors
res = fit_forecast_with_regressor(
regressor_cols=regressor_cols,
lagged_regressor_cols=regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
fut_df_with_lagged_regressor = res["fut_df"]
trained_model_with_lagged_regressor = res["trained_model"]
# with lagged regressors but no regressors
res = fit_forecast_with_regressor(
regressor_cols=[],
lagged_regressor_cols=regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
fut_df_no_regressor = res["fut_df"]
trained_model_no_regressor = res["trained_model"]
# testing errors
# without lagged regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e1 = err[enum.get_metric_name()]
assert round(e1, 1) == 2.7
# with lagged regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_lagged_regressor[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e2 = err[enum.get_metric_name()]
assert e2 > 0 and e2 / e1 < 0.8
# with lagged regressors but no regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df_no_regressor[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e3 = err[enum.get_metric_name()]
assert e3 > e2
# trained models
assert trained_model["has_lagged_regressor_structure"] is False
assert trained_model_with_lagged_regressor["has_lagged_regressor_structure"] is True
assert trained_model_with_lagged_regressor["lagged_regressor_dict"] == lagged_regressor_dict
assert trained_model_with_lagged_regressor["lagged_regressor_func"] is not None
assert trained_model_with_lagged_regressor["min_lagged_regressor_order"] == 1
assert trained_model_with_lagged_regressor["max_lagged_regressor_order"] == 504
expected_pred_cols = [
'ct1',
'regressor1',
'regressor_bool',
'regressor_categ',
'sin1_tod_daily',
'cos1_tod_daily',
'sin1_tow_weekly',
'cos1_tow_weekly']
expected_pred_cols_with_lagged_regressor = [
'ct1',
'regressor1',
'regressor_bool',
'regressor_categ',
'sin1_tod_daily',
'cos1_tod_daily',
'sin1_tow_weekly',
'cos1_tow_weekly',
'regressor1_lag1',
'regressor1_lag168',
'regressor1_avglag_168_336_504',
'regressor1_avglag_169_to_336',
'regressor_bool_lag1',
'regressor_bool_lag168',
'regressor_bool_avglag_168_336_504',
'regressor_bool_avglag_169_to_336',
'regressor_categ_lag1',
'regressor_categ_lag168']
expected_pred_cols_no_regressor = [
'ct1',
'sin1_tod_daily',
'cos1_tod_daily',
'sin1_tow_weekly',
'cos1_tow_weekly',
'regressor1_lag1',
'regressor1_lag168',
'regressor1_avglag_168_336_504',
'regressor1_avglag_169_to_336',
'regressor_bool_lag1',
'regressor_bool_lag168',
'regressor_bool_avglag_168_336_504',
'regressor_bool_avglag_169_to_336',
'regressor_categ_lag1',
'regressor_categ_lag168']
assert trained_model["pred_cols"] == expected_pred_cols
assert trained_model_with_lagged_regressor["pred_cols"] == expected_pred_cols_with_lagged_regressor
assert trained_model_no_regressor["pred_cols"] == expected_pred_cols_no_regressor
trained_mape = trained_model["training_evaluation"]["MAPE"]
trained_mape_with_lagged_regressor = trained_model_with_lagged_regressor["training_evaluation"]["MAPE"]
trained_mape_no_regressor = trained_model_no_regressor["training_evaluation"]["MAPE"]
assert round(trained_mape, 0) == 446
assert round(trained_mape_with_lagged_regressor, 0) == 337
assert round(trained_mape_no_regressor, 0) == 315
def test_forecast_silverkite_with_true_lagged_regressor():
"""Tests efficacy of lagged regressor by a timeseries generated by a true lagged regressor"""
n = 1000
date_list = pd.date_range(
start=datetime.datetime(2018, 7, 1),
periods=n,
freq="D").tolist()
regressor = pd.Series(np.round(np.sin(np.array(range(n))), 8))
lagged_regressor = regressor.shift(3).bfill().ffill()
y = 10 + lagged_regressor
df = pd.DataFrame({
"ts": date_list,
"regressor1": regressor,
"regressor1_lag": lagged_regressor,
"y": y})
train_df = df.iloc[:800].reset_index(drop=True)
test_df = df.iloc[800:].reset_index(drop=True)
fut_time_num = test_df.shape[0]
regressor_cols = ["regressor1"]
lagged_regressor_dict = {
"regressor1": {"lag_dict": {"orders": [3]}}}
def fit_forecast_with_regressor(
regressor_cols=[],
lagged_regressor_cols=[],
lagged_regressor_dict=None):
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=None,
extra_pred_cols=["ct1"] + regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
all_extra_cols = regressor_cols
for col in lagged_regressor_cols:
if col not in all_extra_cols:
all_extra_cols.append(col)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=test_df[all_extra_cols])
return {
"fut_df": fut_df,
"trained_model": trained_model}
# with regressors but no lagged regressors
res = fit_forecast_with_regressor(
regressor_cols=regressor_cols,
lagged_regressor_cols=[],
lagged_regressor_dict=None)
fut_df = res["fut_df"]
trained_model = res["trained_model"]
assert trained_model["pred_cols"] == ["ct1", "regressor1"]
# with lagged regressors
res = fit_forecast_with_regressor(
regressor_cols=regressor_cols,
lagged_regressor_cols=regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
fut_df_with_lagged_regressor = res["fut_df"]
trained_model_with_lagged_regressor = res["trained_model"]
assert trained_model_with_lagged_regressor["pred_cols"] == ["ct1", "regressor1", "regressor1_lag3"]
# with lagged regressors but no regressors
res = fit_forecast_with_regressor(
regressor_cols=[],
lagged_regressor_cols=regressor_cols,
lagged_regressor_dict=lagged_regressor_dict)
fut_df_no_regressor = res["fut_df"]
trained_model_no_regressor = res["trained_model"]
assert trained_model_no_regressor["pred_cols"] == ["ct1", "regressor1_lag3"]
# checks lagged regressor efficacy by comparing prediction errors
# with regressors but no lagged regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e1 = err[enum.get_metric_name()]
# with lagged regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_lagged_regressor[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e2 = err[enum.get_metric_name()]
# with lagged regressors but no regressors
err = calc_pred_err(test_df[VALUE_COL], fut_df_no_regressor[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
e3 = err[enum.get_metric_name()]
assert e2 < 0.1 * e1
assert e3 < 0.1 * e1
assert e2 < e3
def test_forecast_silverkite_2min():
"""Tests silverkite on 2min data"""
data = generate_df_for_tests(
freq="2min",
periods=24 * 30 * 20,
train_frac=0.9,
train_end_date=None,
noise_std=0.1)
train_df = data["train_df"]
test_df = data["test_df"]
fut_time_num = data["fut_time_num"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5],
"seas_names": [None, "weekly", "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"])
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="2min",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.5
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 1.2
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_with_weighted_col():
"""Tests silverkite on 2min data"""
data = generate_df_for_tests(
freq="1D",
periods=400,
train_frac=0.9,
train_end_date=None,
noise_std=0.1)
train_df = data["train_df"]
test_df = data["test_df"]
fut_time_num = data["fut_time_num"]
silverkite = SilverkiteForecast()
# Tests without weighted regression
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tow", "conti_year"],
"period": [7.0, 1.0],
"order": [5, 5],
"seas_names": ["weekly", "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
fit_algorithm="ridge",
regression_weight_col=None)
assert trained_model["regression_weight_col"] is None
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="1D",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 0.21
# Tests with weighted regression
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tow", "conti_year"],
"period": [7.0, 1.0],
"order": [5, 5],
"seas_names": ["weekly", "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
fit_algorithm="ridge",
regression_weight_col="ct1")
assert trained_model["regression_weight_col"] == "ct1"
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="1D",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
# The error is slightly smaller than before
assert round(err[enum.get_metric_name()], 2) == 0.20
def test_forecast_silverkite_2min_with_uncertainty():
"""Tests silverkite on 2min data"""
res = generate_df_for_tests(
freq="2min",
periods=24 * 50 * 30,
train_frac=0.8,
train_end_date=None,
noise_std=0.1)
train_df = res["train_df"]
test_df = res["test_df"][:24 * 30 * 7]
fut_time_num = test_df.shape[0]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 3],
"seas_names": [None, "weekly", "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="2min",
new_external_regressor_df=None)
fut_df["y_true"] = test_df["y"]
fut_df["inside_95_ci"] = fut_df.apply(
lambda row: (
(row["y_true"] <= row["y_quantile_summary"][1])
and (row["y_true"] >= row["y_quantile_summary"][0])),
axis=1)
ci_coverage = 100.0 * fut_df["inside_95_ci"].mean()
assert round(ci_coverage) == 91, (
"95 percent CI coverage is not as expected (91%)")
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.5
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 1.2
def test_forecast_silverkite_simulator():
"""Tests silverkite simulator on hourly data with linear model fit"""
data = generate_df_for_tests(
freq="H",
periods=100 * 30,
train_frac=0.8,
train_end_date=None,
noise_std=0.3)
train_df = data["train_df"]
test_df = data["test_df"][:30 * 7]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}})
past_df = train_df[[TIME_COL, VALUE_COL]].copy()
# simulations with error
sim_df = silverkite.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
include_err=True)
np.random.seed(123)
assert sim_df[VALUE_COL].dtype == "float64"
err = calc_pred_err(test_df[VALUE_COL], sim_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 2) == 0.97
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 0.55
# simulations without errors
sim_df = silverkite.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
include_err=False)
np.random.seed(123)
assert sim_df[VALUE_COL].dtype == "float64"
err = calc_pred_err(test_df[VALUE_COL], sim_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 2) == 0.98
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 0.47
# multiple simulations
sim_df = silverkite.simulate_multi(
fut_df=fut_df,
trained_model=trained_model,
sim_num=2,
past_df=past_df,
new_external_regressor_df=None,
include_err=False)
assert sim_df[VALUE_COL].dtype == "float64"
assert sim_df.shape[0] == fut_df.shape[0] * 2
assert list(sim_df.columns) == [TIME_COL, VALUE_COL, "sim_label"]
"""
# making a plot of comparison between 10 simulations and observed
sim_num = 10
sim_labels = [f"sim{i}" for i in range(sim_num)]
colors_dict = {label: "grey" for label in sim_labels}
df_dict = {}
np.random.seed(123)
for sim_label in sim_labels:
sim_df = silverkite.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=train_df[[TIME_COL, VALUE_COL]].copy(),
new_external_regressor_df=None,
include_err=True)
df_dict[sim_label] = sim_df
df_dict.update({"obs": test_df})
colors_dict.update({"obs": "red"})
legends_dict = {"sim1": "sim", "obs": "obs"}
from greykite.common.viz.timeseries_plotting import plt_compare_timeseries
plt_compare_timeseries(
df_dict=df_dict,
time_col=TIME_COL,
value_col=VALUE_COL,
colors_dict=colors_dict,
legends_dict=legends_dict,
plt_title="",
linewidth=1)
from pathlib import Path
import os
import matplotlib.pyplot as plt
directory = Path(__file__).parents[6]
file_name = os.path.join(
directory,
"simulated_timeseries_vs_observed.png")
plt.savefig(file_name)
plt.close()
"""
def test_forecast_silverkite_simulator_exception():
"""Tests silverkite simulator exception catch"""
data = generate_df_for_tests(
freq="H",
periods=24 * 30,
train_frac=0.8,
train_end_date=None,
noise_std=0.3)
train_df = data["train_df"]
test_df = data["test_df"][:7]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"],
uncertainty_dict=None)
past_df = train_df[[TIME_COL, VALUE_COL]].copy()
# testing for Exception
expected_match = (
"Error is requested via ")
# `uncertainty_dict` is not passed to model.
# Therefore raising exception is expected.
with pytest.raises(ValueError, match=expected_match):
silverkite.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
include_err=True)
def test_forecast_silverkite_predict_via_sim():
"""Tests silverkite simulator on hourly data with linear model fit"""
data = generate_df_for_tests(
freq="H",
periods=100 * 30,
train_frac=0.8,
train_end_date=None,
noise_std=0.3)
train_df = data["train_df"]
test_df = data["test_df"][:30 * 7]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}})
past_df = train_df[[TIME_COL, VALUE_COL]].copy()
# predict via sim
np.random.seed(123)
fut_df = silverkite.predict_via_sim(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
sim_num=10,
include_err=True)
assert list(fut_df.columns) == [
TIME_COL,
VALUE_COL,
f"{VALUE_COL}_quantile_summary",
ERR_STD_COL]
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 2) == 0.98
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 0.48
"""
import os
from pathlib import Path
directory = Path(__file__).parents[6]
file_name = os.path.join(
directory,
"predict_silverkite_via_sim.png")
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=file_name)
# to plot CIs
plt_check_ci(fut_df=fut_df, test_df=test_df)
"""
def test_silverkite_predict():
"""Testing ``predict_silverkite``"""
data = generate_df_for_tests(
freq="D",
periods=300,
train_frac=0.8,
train_end_date=None,
noise_std=3,
remove_extra_cols=True,
autoreg_coefs=[10] * 24,
fs_coefs=[0.1, 1, 0.1],
growth_coef=2.0)
train_df = data["train_df"]
test_df = data["test_df"]
fut_df = test_df[:5].copy()
fut_df[VALUE_COL] = None
fut_df_with_gap = test_df[5:10].copy()
fut_df_with_gap[VALUE_COL] = None
fut_df_including_training = pd.concat(
[train_df, fut_df],
axis=0,
ignore_index=True)
fut_df_including_training[VALUE_COL] = None
autoreg_dict = {
"lag_dict": {"orders": list(range(7, 14))},
"agg_lag_dict": None,
"series_na_fill_func": lambda s: s.bfill().ffill()}
# These are the columns we expect to get from the predictions
expected_fut_df_cols = [
TIME_COL, VALUE_COL, f"{VALUE_COL}_quantile_summary", ERR_STD_COL]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fit_algorithm="statsmodels_ols",
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 5]}),
extra_pred_cols=["ct1", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}},
autoreg_dict=autoreg_dict)
# ``fut_df`` does not include training data
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": 5,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# ``fut_df`` includes training data
predict_info = silverkite.predict(
fut_df=fut_df_including_training,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": train_df.shape[0],
"len_after_training": 5,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df_including_training.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# ``fut_df`` has a gap
# In this case simulations will be invoked
# This is because ``min_lag_order < forecast_horizon``
predict_info = silverkite.predict(
fut_df=fut_df_with_gap,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 10
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": 5,
"len_gap": 5}
assert predict_info["fut_df"].shape[0] == fut_df_with_gap.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
def test_predict_silverkite_with_regressors():
"""Testing ``predict_silverkite`` in presence of regressors"""
data = generate_df_with_reg_for_tests(
freq="D",
periods=500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
fut_time_num = 5
len_gap = 4
train_df = data["train_df"]
test_df = data["test_df"]
fut_df = test_df[:fut_time_num].reset_index(drop=True)
fut_df[VALUE_COL] = None
fut_df_with_gap = test_df[len_gap:(len_gap + fut_time_num)].copy()
fut_df_with_gap[VALUE_COL] = None
fut_df_including_training = pd.concat(
[train_df, fut_df],
axis=0,
ignore_index=True)
fut_df_including_training[VALUE_COL] = None
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
autoreg_dict = {
"lag_dict": {"orders": list(range(7, 14))},
"agg_lag_dict": None,
"series_na_fill_func": lambda s: s.bfill().ffill()}
# These are the columns we expect to get from the predictions
expected_fut_df_cols = [
TIME_COL, VALUE_COL, f"{VALUE_COL}_quantile_summary", ERR_STD_COL]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fit_algorithm="statsmodels_ols",
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 5]}),
extra_pred_cols=["ct1", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}},
autoreg_dict=autoreg_dict)
# (Case 1.a) ``fut_df`` does not include training data
# regressors passed through ``fut_df``
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": fut_time_num,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# (Case 1.b) ``fut_df`` does not include training data
# regressors passed separately
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df[[TIME_COL]],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=fut_df[regressor_cols].copy(),
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": fut_time_num,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# (Case 2.a) ``fut_df`` includes training data.
# Regressors passed through ``fut_df``
predict_info = silverkite.predict(
fut_df=fut_df_including_training,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": train_df.shape[0],
"len_after_training": fut_time_num,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df_including_training.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# (Case 2.b) ``fut_df`` includes training data.
# Regressors passed directly.
predict_info = silverkite.predict(
fut_df=fut_df_including_training[[TIME_COL]],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=fut_df_including_training[regressor_cols].copy(),
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": train_df.shape[0],
"len_after_training": fut_time_num,
"len_gap": 0}
assert predict_info["fut_df"].shape[0] == fut_df_including_training.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# (Case 3.a) ``fut_df`` has a gap.
# Regressors passed through ``fut_df``.
# In this case simulations will be invoked.
# This is because ``min_lag_order < forecast_horizon``.
predict_info = silverkite.predict(
fut_df=fut_df_with_gap,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == len_gap + fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": fut_time_num,
"len_gap": len_gap}
assert predict_info["fut_df"].shape[0] == fut_df_with_gap.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
# (Case 3.b) ``fut_df`` has a gap.
# Regressors passed directly
# In this case simulations will be invoked.
# This is because ``min_lag_order < forecast_horizon``.
predict_info = silverkite.predict(
fut_df=fut_df_with_gap[[TIME_COL]],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=fut_df_with_gap[regressor_cols].copy(),
sim_num=5,
include_err=None,
force_no_sim=False,
na_fill_func=lambda s: s.interpolate().bfill()) # Simple NA fill is used for easy to track testing
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == len_gap + fut_time_num
assert predict_info["min_lag_order"] == 7
assert predict_info["fut_df_info"]["forecast_partition_summary"] == {
"len_before_training": 0,
"len_within_training": 0,
"len_after_training": fut_time_num,
"len_gap": len_gap}
assert predict_info["fut_df"].shape[0] == fut_df_with_gap.shape[0]
assert list(predict_info["fut_df"].columns) == expected_fut_df_cols
fut_df_gap = predict_info["fut_df_info"]["fut_df_gap"]
expected_time_gaps = pd.date_range(
start=train_df.tail(1)[TIME_COL].values[0] + pd.to_timedelta("1D"),
periods=len_gap,
freq="1D")
expected_fut_df_gap = pd.DataFrame({
TIME_COL: expected_time_gaps,
"regressor1": [test_df.iloc[len_gap]["regressor1"]]*len_gap,
"regressor_bool": [test_df.iloc[len_gap]["regressor_bool"]]*len_gap,
"regressor_categ": [test_df.iloc[len_gap]["regressor_categ"]]*len_gap
})
expected_fut_df_gap[TIME_COL] = pd.to_datetime(expected_fut_df_gap[TIME_COL])
assert_frame_equal(fut_df_gap, expected_fut_df_gap)
def test_predict_silverkite_with_lagged_regressors():
"""Testing ``SilverkiteForecast.predict`` in presence of lagged regressors"""
data = generate_df_with_reg_for_tests(
freq="1D",
periods=20 * 7, # short-term: 20 weeks of data
remove_extra_cols=True,
mask_test_actuals=True)
regressor_cols = ["regressor1", "regressor2", "regressor_categ"]
keep_cols = [TIME_COL, VALUE_COL] + regressor_cols
train_df = data["train_df"][keep_cols]
test_df = data["test_df"][keep_cols]
fut_df = test_df.copy()
# Specify 2 configurations of autoreg_dict
# autoreg_dict1 would need simulation in predict phase
# autoreg_dict2 does not need simulation in predict phase
autoreg_dict1 = {
"lag_dict": {"orders": [7]},
"agg_lag_dict": {
"orders_list": [[7, 7*2, 7*3]],
"interval_list": [(8, 7*2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
autoreg_dict2 = {
"lag_dict": {"orders": [28]},
"agg_lag_dict": {
"orders_list": [],
"interval_list": [(7*4 + 1, 7*5)]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
lagged_regressor_dict = {
"regressor1": {
"lag_dict": {"orders": [1, 2, 3]},
"agg_lag_dict": {
"orders_list": [[7, 7 * 2, 7 * 3]],
"interval_list": [(8, 7 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()},
"regressor2": "auto"
}
fs_components_df = pd.DataFrame({
"name": ["tow", "conti_year"],
"period": [7.0, 1.0],
"order": [3, 0],
"seas_names": ["weekly", None]})
# Has autoregression and simulation is used in predict phase
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
fit_algorithm="linear",
fs_components_df=fs_components_df,
extra_pred_cols=regressor_cols,
autoreg_dict=autoreg_dict1,
lagged_regressor_dict=lagged_regressor_dict)
np.random.seed(123)
result1 = silverkite.predict(
fut_df=fut_df.head(10), # this is bigger than the minimal order in autoreg_dict1
trained_model=trained_model,
past_df=train_df,
new_external_regressor_df=None,
force_no_sim=False)
expected_lag_cols = [
"y_lag7",
"y_avglag_7_14_21",
"y_avglag_8_to_14",
"regressor1_lag1",
"regressor1_lag2",
"regressor1_lag3",
"regressor1_avglag_7_14_21",
"regressor1_avglag_8_to_14",
"regressor2_lag35",
"regressor2_avglag_35_42_49",
"regressor2_avglag_30_to_36"]
assert set(expected_lag_cols).issubset(trained_model["pred_cols"])
assert result1["fut_df"].shape == (10, 2)
assert result1["fut_df"].isna().sum().sum() == 0
# Has autoregression and simulation is not used in predict phase
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
fit_algorithm="linear",
fs_components_df=fs_components_df,
extra_pred_cols=regressor_cols,
autoreg_dict=autoreg_dict2,
lagged_regressor_dict=lagged_regressor_dict)
np.random.seed(123)
result2 = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=train_df,
new_external_regressor_df=None,
force_no_sim=False)
expected_lag_cols = [
"y_lag28",
"y_avglag_29_to_35",
"regressor1_lag1",
"regressor1_lag2",
"regressor1_lag3",
"regressor1_avglag_7_14_21",
"regressor1_avglag_8_to_14",
"regressor2_lag35",
"regressor2_avglag_35_42_49",
"regressor2_avglag_30_to_36"]
assert set(expected_lag_cols).issubset(trained_model["pred_cols"])
assert result2["fut_df"].shape == (27, 2)
assert result2["fut_df"].isna().sum().sum() == 0
# No autoregression
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
fit_algorithm="linear",
fs_components_df=fs_components_df,
extra_pred_cols=regressor_cols,
autoreg_dict=None,
lagged_regressor_dict=lagged_regressor_dict)
np.random.seed(123)
result3 = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=train_df,
new_external_regressor_df=None,
force_no_sim=False)
expected_lag_cols = [
"regressor1_lag1",
"regressor1_lag2",
"regressor1_lag3",
"regressor1_avglag_7_14_21",
"regressor1_avglag_8_to_14",
"regressor2_lag35",
"regressor2_avglag_35_42_49",
"regressor2_avglag_30_to_36"]
assert set(expected_lag_cols).issubset(trained_model["pred_cols"])
assert result3["fut_df"].shape == (27, 2)
assert result3["fut_df"].isna().sum().sum() == 0
def test_predict_silverkite_exceptions():
"""Testing ``predict_silverkite``"""
data = generate_df_for_tests(
freq="D",
periods=300,
train_frac=0.8,
train_end_date=None,
noise_std=3,
remove_extra_cols=True,
autoreg_coefs=[10] * 24,
fs_coefs=[0.1, 1, 0.1],
growth_coef=2.0)
train_df = data["train_df"]
test_df = data["test_df"]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
fut_df_with_before_training = train_df[[TIME_COL]]
fut_df_with_before_training[TIME_COL] = (
fut_df_with_before_training[TIME_COL] - datetime.timedelta(days=1))
autoreg_dict = {
"lag_dict": {"orders": list(range(7, 14))},
"agg_lag_dict": None,
"series_na_fill_func": lambda s: s.bfill().ffill()}
uncertainty_dict = {
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fit_algorithm="statsmodels_ols",
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct1", "dow_hr"],
uncertainty_dict=uncertainty_dict,
autoreg_dict=autoreg_dict)
# Trains a model with uncertainty
trained_model_no_uncertainty = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fit_algorithm="statsmodels_ols",
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]}),
extra_pred_cols=["ct1", "dow_hr"],
uncertainty_dict=None,
autoreg_dict=autoreg_dict)
# Checks Exception for ``include_err = True`` while no uncertainty in the model
expected_match = "However model does not support uncertainty. "
with pytest.raises(ValueError, match=expected_match):
silverkite.predict(
fut_df=fut_df,
trained_model=trained_model_no_uncertainty,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=True,
force_no_sim=False)
expected_match = "cannot have timestamps occurring before the training data"
with pytest.raises(ValueError, match=expected_match):
silverkite.predict(
fut_df=fut_df_with_before_training,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
expected_match = "must be a dataframe of non-zero size"
with pytest.raises(ValueError, match=expected_match):
silverkite.predict(
fut_df=fut_df.iloc[0:0],
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
expected_match = "which is what ``trained_model`` considers to be the time column"
with pytest.raises(ValueError, match=expected_match):
fut_df0 = fut_df[[TIME_COL]]
fut_df0.columns = ["dummy_ts"]
silverkite.predict(
fut_df=fut_df0,
trained_model=trained_model,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
def test_predict_silverkite_compare_various_ways():
"""Testing various ways to perform prediction using silverkite model.
Make sure predictions match when expected."""
data = generate_df_for_tests(
freq="H",
periods=24 * 300,
train_frac=0.8,
train_end_date=None,
noise_std=3,
remove_extra_cols=True,
autoreg_coefs=[10] * 24,
fs_coefs=[0.1, 1, 0.1],
growth_coef=2.0)
train_df = data["train_df"]
test_df = data["test_df"][:5]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
# With autoregression with min lag = 2
autoreg_dict_recent_lag = {
"lag_dict": {"orders": list(range(1, 3))},
"agg_lag_dict": None,
"series_na_fill_func": lambda s: s.bfill().ffill()}
# With autoregression with min lag = 168
autoreg_dict_old_lag_only = {
"lag_dict": None,
"agg_lag_dict": {
"orders_list": [[168, 168 * 2]],
"interval_list": [(168, 168 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
silverkite = SilverkiteForecast()
def fit_silverkite(autoreg_dict):
return silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fit_algorithm="statsmodels_ols",
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 5]}),
extra_pred_cols=["ct1", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}},
autoreg_dict=autoreg_dict)
trained_model_old_lag_only = fit_silverkite(
autoreg_dict=autoreg_dict_old_lag_only)
trained_model_with_recent_lag = fit_silverkite(
autoreg_dict=autoreg_dict_recent_lag)
trained_model_no_autoreg = fit_silverkite(autoreg_dict=None)
# (Case 1) First the case with autoregression with old lag only
# In this case we expect that no sim approach will be triggered
# by ``silverkite.predict_n(``, and ``predict_silverkite``,
# because ``min_lag_order`` is 2 while forecast horizon is 24
np.random.seed(123)
fut_df_with_ar = silverkite.predict_n_no_sim(
fut_time_num=test_df.shape[0],
trained_model=trained_model_old_lag_only,
freq="1H",
new_external_regressor_df=None)
# Directly using `silverkite.predict_n(` which will use simulations.
# We expect the same result as above.
np.random.seed(123)
predict_info = silverkite.predict_n(
fut_time_num=test_df.shape[0],
trained_model=trained_model_old_lag_only,
freq="1H",
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 168
fut_df_with_ar_2 = predict_info["fut_df"]
# Uses ``predict_silverkite``
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model_old_lag_only,
past_df=train_df[[TIME_COL, VALUE_COL]].copy(),
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 168
fut_df_with_ar_3 = predict_info["fut_df"]
# Checks the case where `past_df` is not passed
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df.copy(),
trained_model=trained_model_old_lag_only,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 168
fut_df_with_ar_4 = predict_info["fut_df"]
# We expect to get the exact same future predictions using three above calls
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_2[[TIME_COL, VALUE_COL]])
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_3[[TIME_COL, VALUE_COL]])
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_4[[TIME_COL, VALUE_COL]])
assert list(fut_df_with_ar.columns) == [
TIME_COL,
VALUE_COL,
f"{VALUE_COL}_quantile_summary",
ERR_STD_COL]
# (Case 2) The case with short autoregression
# In this case we expect that via_sim approach will be triggered
# by ``silverkite.predict_n(``, and ``predict_silverkite``
# because ``min_lag_order`` is 168*2 while forecast horizon is 24
np.random.seed(123)
fut_df_with_ar = silverkite.predict_n_via_sim(
fut_time_num=test_df.shape[0],
trained_model=trained_model_with_recent_lag,
freq="1H",
new_external_regressor_df=None,
sim_num=5,
include_err=None)
# Directly uses ``silverkite.predict_n(`` which will use simulations.
# We expect the same result as above.
np.random.seed(123)
predict_info = silverkite.predict_n(
fut_time_num=test_df.shape[0],
trained_model=trained_model_with_recent_lag,
freq="1H",
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 1
fut_df_with_ar_2 = predict_info["fut_df"]
# Uses ``predict_silverkite``
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df.copy(),
trained_model=trained_model_with_recent_lag,
past_df=train_df[[TIME_COL, VALUE_COL]].copy(),
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 1
fut_df_with_ar_3 = predict_info["fut_df"]
# Checks the case when`past_df` is not passed.
np.random.seed(123)
predict_info = silverkite.predict(
fut_df=fut_df,
trained_model=trained_model_with_recent_lag,
past_df=None,
new_external_regressor_df=None,
sim_num=5,
include_err=None,
force_no_sim=False)
assert not predict_info["simulations_not_used"]
assert predict_info["fut_df_info"]["inferred_forecast_horizon"] == 5
assert predict_info["min_lag_order"] == 1
fut_df_with_ar_4 = predict_info["fut_df"]
# We expect to get the exact same future predictions using three above calls
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_2[[TIME_COL, VALUE_COL]])
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_3[[TIME_COL, VALUE_COL]])
assert_frame_equal(
fut_df_with_ar[[TIME_COL, VALUE_COL]],
fut_df_with_ar_4[[TIME_COL, VALUE_COL]])
assert list(fut_df_with_ar.columns) == [
TIME_COL,
VALUE_COL,
f"{VALUE_COL}_quantile_summary",
ERR_STD_COL]
# (Case 3) Tests the cases with no AR
fut_df_no_ar = silverkite.predict_n_no_sim(
fut_time_num=test_df.shape[0],
trained_model=trained_model_no_autoreg,
freq="1H",
new_external_regressor_df=None)
# Directly calculated via ``silverkite.predict_n(``
predict_info = silverkite.predict_n(
fut_time_num=test_df.shape[0],
trained_model=trained_model_no_autoreg,
freq="1H",
new_external_regressor_df=None)
fut_df_no_ar2 = predict_info["fut_df"]
# Directly calculated via ``predict_silverkite``
predict_info = silverkite.predict(
fut_df=fut_df.copy(),
trained_model=trained_model_no_autoreg,
past_df=train_df[[TIME_COL, VALUE_COL]].copy(),
new_external_regressor_df=None,
sim_num=10,
include_err=None,
force_no_sim=False)
fut_df_no_ar3 = predict_info["fut_df"]
# We expect to get the exact same future predictions using three above calls
assert_frame_equal(
fut_df_no_ar[[TIME_COL, VALUE_COL]],
fut_df_no_ar2[[TIME_COL, VALUE_COL]])
assert_frame_equal(
fut_df_no_ar[[TIME_COL, VALUE_COL]],
fut_df_no_ar3[[TIME_COL, VALUE_COL]])
err = calc_pred_err(test_df[VALUE_COL], fut_df_with_ar[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] == pytest.approx(114.2, rel=1e-2)
err = calc_pred_err(test_df[VALUE_COL], fut_df_no_ar[VALUE_COL])
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] == pytest.approx(59.8, rel=1e-2)
"""
import os
from pathlib import Path
directory = Path(__file__).parents[6]
file_name = os.path.join(
directory,
"silverkite.predict_n(_via_sim.png")
plt_compare_timeseries(
df_dict={
"train data last part": train_df[-(24*60):],
"test data": test_df,
"forecast AR/sim": fut_df_with_ar_sim,
"forecast no AR": fut_df_no_ar},
time_col=TIME_COL,
value_col=VALUE_COL,
colors_dict={
"train data last part": "orange",
"test data": "red",
"forecast AR/sim": "green",
"forecast no AR": "olive"},
plt_title="")
if file_name is not None:
plt.savefig(file_name)
plt.close()
# to plot CIs
plt_check_ci(fut_df=fut_df_with_ar_sim, test_df=test_df)
plt_check_ci(fut_df=fut_df_no_ar, test_df=test_df)
"""
def test_silverkite_predict_n_include_err_exception():
"""Testing for exception for `include_err=True` while
uncertainty is not passsed"""
data = generate_df_for_tests(
freq="H",
periods=24 * 300,
train_frac=0.8,
train_end_date=None,
noise_std=3,
remove_extra_cols=True,
autoreg_coefs=[10] * 24,
fs_coefs=[0.1, 1, 0.1],
growth_coef=2.0)
train_df = data["train_df"]
test_df = data["test_df"]
fut_df = test_df.copy()
fut_df[VALUE_COL] = None
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [0, 1]}),
extra_pred_cols=[],
uncertainty_dict=None,
autoreg_dict=None)
expected_match = "However model does not support uncertainty. "
with pytest.raises(ValueError, match=expected_match):
silverkite.predict_n(
fut_time_num=test_df.shape[0],
trained_model=trained_model,
freq="1H",
new_external_regressor_df=None,
sim_num=5,
include_err=True,
force_no_sim=False)
with pytest.raises(ValueError, match=expected_match):
silverkite.predict(
fut_df=fut_df,
trained_model=trained_model,
new_external_regressor_df=None,
sim_num=5,
include_err=True,
force_no_sim=False)
def test_forecast_silverkite_simulator_regressor():
"""Tests silverkite simulator with regressors"""
data = generate_df_with_reg_for_tests(
freq="D",
periods=500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
train_df = data["train_df"].reset_index(drop=True)
test_df = data["test_df"].reset_index(drop=True)
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
training_fraction=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 5],
"seas_names": None}),
extra_pred_cols=["ct_sqrt", "dow_hr", "ct1"] + regressor_cols,
fit_algorithm="linear",
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}})
past_df = train_df[[TIME_COL, VALUE_COL]].copy()
sim_df = silverkite.simulate(
fut_df=test_df[[TIME_COL, VALUE_COL]],
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=test_df[regressor_cols],
include_err=True)
assert sim_df[VALUE_COL].dtype == "float64"
err = calc_pred_err(test_df[VALUE_COL], sim_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 2) == 0.56
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 2.83
# predict via sim
np.random.seed(123)
fut_df = silverkite.predict_via_sim(
fut_df=test_df[[TIME_COL, VALUE_COL]],
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=test_df[regressor_cols],
sim_num=10,
include_err=True)
assert list(fut_df.columns) == [
TIME_COL,
VALUE_COL,
f"{VALUE_COL}_quantile_summary",
ERR_STD_COL]
assert sim_df[VALUE_COL].dtype == "float64"
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert round(err[enum.get_metric_name()], 2) == 0.65
enum = EvaluationMetricEnum.RootMeanSquaredError
assert round(err[enum.get_metric_name()], 2) == 2.35
"""
plt_comparison_forecast_vs_observed(
fut_df=sim_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_with_holidays_hourly():
"""Tests silverkite with holidays and seasonality interactions"""
res = generate_df_with_holidays(freq="H", periods=24 * 700)
train_df = res["train_df"]
test_df = res["test_df"]
fut_time_num = res["fut_time_num"]
# generate holidays
countries = ["US", "India"]
event_df_dict = get_holidays(countries, year_start=2015, year_end=2025)
for country in countries:
event_df_dict[country][EVENT_DF_LABEL_COL] = country + "_holiday"
# custom seasonality names
fourier_col1 = get_fourier_col_name(
k=1,
col_name="tod",
function_name="sin",
seas_name="daily")
fourier_col2 = get_fourier_col_name(
k=1,
col_name="tod",
function_name="cos",
seas_name="daily")
fourier_col3 = get_fourier_col_name(1, "conti_year", function_name="cos")
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col="ts",
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5],
"seas_names": ["daily", "weekly", None]}),
extra_pred_cols=["ct_sqrt", "dow_hr", f"events_US*{fourier_col1}",
f"events_US*{fourier_col2}",
f"events_US*{fourier_col3}"],
daily_event_df_dict=event_df_dict)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_with_holidays_effect():
"""Tests silverkite, modeling a separate effect per holiday
(instead of per holiday+country as in
test_forecast_silverkite_with_holidays_hourly)
"""
res = generate_df_with_holidays(freq="H", periods=24 * 700)
train_df = res["train_df"]
test_df = res["test_df"]
fut_time_num = res["fut_time_num"]
# generate holidays
countries = ["US", "India"]
holidays_to_model_separately = [
"New Year's Day",
"Christmas Day",
"Independence Day",
"Thanksgiving",
"Labor Day",
"Memorial Day",
"Veterans Day"]
event_df_dict = generate_holiday_events(
countries=countries,
holidays_to_model_separately=holidays_to_model_separately,
year_start=2015,
year_end=2025,
pre_num=0,
post_num=0)
# constant event effect at daily level
event_cols = [f"Q('events_{key}')" for key in event_df_dict.keys()]
# different hourly seasonality on weekends.
# fs_* matches the specification to "fs_components_df"
interaction_cols = cols_interact(
static_col="is_weekend",
fs_name="tod",
fs_order=3,
fs_seas_name="daily")
extra_pred_cols = ["ct_sqrt", "dow_hr"] + event_cols + interaction_cols
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col="ts",
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2019, 6, 1),
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5],
"seas_names": ["daily", "weekly", None]}),
extra_pred_cols=extra_pred_cols,
daily_event_df_dict=event_df_dict)
fut_df = silverkite.predict_n_no_sim(
fut_time_num=fut_time_num,
trained_model=trained_model,
freq="H",
new_external_regressor_df=None)
err = calc_pred_err(test_df[VALUE_COL], fut_df[VALUE_COL])
enum = EvaluationMetricEnum.Correlation
assert err[enum.get_metric_name()] > 0.3
enum = EvaluationMetricEnum.RootMeanSquaredError
assert err[enum.get_metric_name()] < 6.0
"""
plt_comparison_forecast_vs_observed(
fut_df=fut_df,
test_df=test_df,
file_name=None)
"""
def test_forecast_silverkite_train_test_thresh_error(hourly_data):
df = hourly_data["df"]
last_time_available = max(df[TIME_COL])
train_test_thresh = datetime.datetime(2020, 7, 1)
with pytest.raises(ValueError) as record:
silverkite = SilverkiteForecast()
silverkite.forecast(
df=df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=datetime.datetime(2020, 7, 1),
origin_for_time_vars=None,
fs_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 0, 5]})
)
assert f"Input timestamp for the parameter 'train_test_threshold' " \
f"({train_test_thresh}) exceeds the maximum available " \
f"timestamp of the time series ({last_time_available})." \
f"Please pass a value within the range." in record[0].message.args[0]
def test_forecast_silverkite_with_imputation():
"""Tests ``forecast_silverkite`` with imputations"""
df = pd.DataFrame({
"ts": len(pd.date_range(start="1/1/2018", end="3/14/2018")),
"y": list(range(70)) + [np.nan]*3})
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
training_fraction=None,
origin_for_time_vars=None,
fs_components_df=None,
impute_dict={
"func": impute_with_lags,
"params": {"orders": [7]}})
impute_info = trained_model["impute_info"]
assert impute_info["initial_missing_num"] == 3
assert impute_info["final_missing_num"] == 0
imputed_df = impute_info["df"]
assert list(imputed_df["y"].values) == (
list(range(70)) + [63, 64, 65])
def test_forecast_silverkite_with_adjust_anomalous():
"""Tests ``forecast_silverkite`` with anomalous_data``"""
anomalous_data = generate_anomalous_data()
anomaly_df = anomalous_data["anomaly_df"]
df = anomalous_data["df"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
training_fraction=None,
origin_for_time_vars=None,
fs_components_df=None,
adjust_anomalous_dict={
"func": adjust_anomalous_data,
"params": {
"anomaly_df": anomaly_df,
"start_date_col": START_DATE_COL,
"end_date_col": END_DATE_COL,
"adjustment_delta_col": ADJUSTMENT_DELTA_COL,
"filter_by_dict": {"platform": "MOBILE"}}})
adj_df_info = trained_model["adjust_anomalous_info"]
adj_values = pd.Series([np.nan, np.nan, 2., 6., 7., 8., 6., 7., 8., 9.])
generic_test_adjust_anomalous_data(
value_col=VALUE_COL,
adj_df_info=adj_df_info,
adj_values=adj_values)
def test_silverkite_partition_fut_df():
"""Tests ``partition_fut_df``"""
freq = "1D"
data = generate_df_for_tests(
freq=freq,
periods=500,
train_frac=0.8,
train_end_date=None,
noise_std=0.1)
train_df = data["train_df"]
test_df = data["test_df"]
all_df = data["df"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 3],
"seas_names": [None, "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
)
# The case where ``fut_df`` is only future data and with no gaps
fut_df = test_df[[TIME_COL, VALUE_COL]]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [True] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == 0
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == fut_df.shape[0]
assert np.all(fut_df_stats["index_after_training_original"] == [True] * fut_df.shape[0])
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == fut_df.shape[0]
# The case where ``fut_df`` is only future data and with gaps
fut_df = test_df[[TIME_COL, VALUE_COL]][2:]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [True] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == 0
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_gap"].shape[0] == 2
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == fut_df.shape[0] + 2
assert np.all(fut_df_stats["index_after_training_original"] == [False] * 2 + [True] * fut_df.shape[0])
assert fut_df_stats["missing_periods_num"] == 2
assert fut_df_stats["inferred_forecast_horizon"] == fut_df.shape[0] + 2
# The case where ``fut_df`` is only part of the training data (no gaps as a result)
fut_df = train_df[[TIME_COL, VALUE_COL]][2:]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [False] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == 0
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == 0
assert fut_df_stats["index_after_training_original"] == []
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == 0
# The case where ``fut_df`` has both training and future timestamps
# and the data has regular time increments
fut_df = all_df.copy()
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * train_df.shape[0] + [False] * test_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [False] * train_df.shape[0] + [True] * test_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == train_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == test_df.shape[0]
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == test_df.shape[0]
assert fut_df_stats["index_after_training_original"] == [True] * test_df.shape[0]
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == test_df.shape[0]
# The case where both training and future timestamps appear and we have a gap
# Therefore ``fut_df`` is not a regular increment series
fut_df = pd.concat(
[train_df, test_df[5:]],
axis=0,
ignore_index=True)
# The original length of the future timestamps
fut_length = test_df.shape[0] - 5
with pytest.warns(Warning) as record:
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert "does not have regular time increments" in record[0].message.args[0]
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * train_df.shape[0] + [False] * fut_length)
assert np.all(fut_df_stats["index_after_training"] == [False] * train_df.shape[0] + [True] * fut_length)
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == train_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_length
assert fut_df_stats["fut_df_gap"].shape[0] == 5
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == test_df.shape[0]
assert fut_df_stats["index_after_training_original"] == [False] * 5 + [True] * fut_length
assert fut_df_stats["missing_periods_num"] == 5
assert fut_df_stats["inferred_forecast_horizon"] == test_df.shape[0]
def test_partition_fut_df_monthly():
"""Tests ``partition_fut_df`` with monthly data"""
freq = "MS"
data = generate_df_for_tests(
freq=freq,
periods=60,
train_frac=0.8,
train_end_date=None,
noise_std=0.1)
train_df = data["train_df"]
test_df = data["test_df"]
all_df = data["df"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["conti_year"],
"period": [1.0],
"order": [3],
"seas_names": ["yearly"]}),
extra_pred_cols=["ct_sqrt"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
)
# The case where ``fut_df`` is only future data and with no gaps
fut_df = test_df[[TIME_COL, VALUE_COL]]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600 * 31
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600 * 31
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [True] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == 0
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == fut_df.shape[0]
assert np.all(fut_df_stats["index_after_training_original"] == [True] * fut_df.shape[0])
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == fut_df.shape[0]
# The case where ``fut_df`` is only future data and with gaps
fut_df = test_df[[TIME_COL, VALUE_COL]][2:]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600 * 31
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600 * 31
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [True] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == 0
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_gap"].shape[0] == 2
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == fut_df.shape[0] + 2
assert np.all(fut_df_stats["index_after_training_original"] == [False] * 2 + [True] * fut_df.shape[0])
assert fut_df_stats["missing_periods_num"] == 2
assert fut_df_stats["inferred_forecast_horizon"] == fut_df.shape[0] + 2
# The case where ``fut_df`` is only part of the training data (no gaps as a result)
fut_df = train_df[[TIME_COL, VALUE_COL]][2:]
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600 * 31
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600 * 31
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * fut_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [False] * fut_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == fut_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == 0
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == 0
assert fut_df_stats["index_after_training_original"] == []
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == 0
# The case where ``fut_df`` has both training and future timestamps
# and the data has consistent freq
fut_df = all_df.copy()
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600 * 31
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600 * 31
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * train_df.shape[0] + [False] * test_df.shape[0])
assert np.all(fut_df_stats["index_after_training"] == [False] * train_df.shape[0] + [True] * test_df.shape[0])
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == train_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == test_df.shape[0]
assert fut_df_stats["fut_df_gap"] is None
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == test_df.shape[0]
assert fut_df_stats["index_after_training_original"] == [True] * test_df.shape[0]
assert fut_df_stats["missing_periods_num"] == 0
assert fut_df_stats["inferred_forecast_horizon"] == test_df.shape[0]
# The case where both training and future timestamps appear and we have a gap
# Therefore ``fut_df`` has a gap
fut_df = pd.concat(
[train_df, test_df[5:]],
axis=0,
ignore_index=True)
# The original length of the future timestamps
fut_length = test_df.shape[0] - 5
with pytest.warns(Warning) as record:
fut_df_stats = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
assert "does not have regular time increments" in record[0].message.args[0]
assert fut_df_stats["fut_freq_in_secs"] == 24 * 3600 * 31
assert fut_df_stats["training_freq_in_secs"] == 24 * 3600 * 31
assert np.all(fut_df_stats["index_before_training"] == [False] * fut_df.shape[0])
assert np.all(fut_df_stats["index_within_training"] == [True] * train_df.shape[0] + [False] * fut_length)
assert np.all(fut_df_stats["index_after_training"] == [False] * train_df.shape[0] + [True] * fut_length)
assert fut_df_stats["fut_df_before_training"].shape[0] == 0
assert fut_df_stats["fut_df_within_training"].shape[0] == train_df.shape[0]
assert fut_df_stats["fut_df_after_training"].shape[0] == fut_length
assert fut_df_stats["fut_df_gap"].shape[0] == 5
assert fut_df_stats["fut_df_after_training_expanded"].shape[0] == test_df.shape[0]
assert fut_df_stats["index_after_training_original"] == [False] * 5 + [True] * fut_length
assert fut_df_stats["missing_periods_num"] == 5
assert fut_df_stats["inferred_forecast_horizon"] == test_df.shape[0]
def test_partition_fut_df_exceptions():
"""Tests exceptions ``partition_fut_df``"""
freq = "1D"
data = generate_df_for_tests(
freq=freq,
periods=500,
train_frac=0.8,
train_end_date=None,
noise_std=0.1)
train_df = data["train_df"]
silverkite = SilverkiteForecast()
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
train_test_thresh=None,
origin_for_time_vars=2018,
fs_components_df=pd.DataFrame({
"name": ["tod", "conti_year"],
"period": [24.0, 1.0],
"order": [3, 3],
"seas_names": [None, "yearly"]}),
extra_pred_cols=["ct_sqrt", "dow_hr"],
uncertainty_dict={
"uncertainty_method": "simple_conditional_residuals",
"params": {
"conditional_cols": ["dow_hr"],
"quantiles": [0.025, 0.975],
"quantile_estimation_method": "normal_fit",
"sample_size_thresh": 20,
"small_sample_size_method": "std_quantiles",
"small_sample_size_quantile": 0.98}}
)
expected_match = "must be increasing in time"
with pytest.raises(ValueError, match=expected_match):
fut_df = train_df[[TIME_COL, VALUE_COL]].iloc[[3, 2, 1]]
silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
expected_match = "The most immediate time in the future is off"
with pytest.raises(ValueError, match=expected_match):
fut_df = train_df[[TIME_COL]].copy()
last_training_date = max(fut_df[TIME_COL])
t0 = last_training_date + datetime.timedelta(days=0.5)
fut_df_after_training = pd.DataFrame({TIME_COL: [t0]})
fut_df = pd.concat(
[fut_df, fut_df_after_training],
axis=0,
ignore_index=True)
silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq)
def test_predict_silverkite_with_autoreg_horizon_1(hourly_data):
"""Tests forecast_silverkite autoregression"""
train_df = hourly_data["train_df"]
silverkite = SilverkiteForecast()
# Trains model with autoregression.
trained_model = silverkite.forecast(
df=train_df,
time_col=TIME_COL,
value_col=VALUE_COL,
autoreg_dict="auto"
)
# Generates future df with horizon 1.
freq = "H"
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=2,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({trained_model["time_col"]: dates.tolist()})
# Makes sure ``partition_fut_df`` handles horizon 1 correctly.
res = silverkite.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
freq=freq
)
assert res["fut_freq_in_secs"] is None
# Runs ``predict_silverkite`` and expects no error.
silverkite.predict(
fut_df=fut_df,
trained_model=trained_model
)
def test_build_silverkite_features():
"""A basic testing of build_silverkite_features and input validation"""
silverkite = SilverkiteForecast()
daily_event_df_dict = get_holidays(["US"], year_start=2015, year_end=2025)
# simple test
with pytest.warns(None) as record:
df = generate_df_for_tests(
freq="D",
periods=20)
explan_df = silverkite._SilverkiteForecast__build_silverkite_features(
df=df["train_df"],
time_col=TIME_COL,
origin_for_time_vars=2017,
daily_event_df_dict=daily_event_df_dict,
changepoint_values=None,
continuous_time_col=None,
growth_func=None,
fs_func=None)
assert list(explan_df[:3]["ct1"].round(4).values) == [1.4959, 1.4986, 1.5014]
assert list(explan_df[:3]["dow"].values) == [7, 1, 2]
assert list(explan_df[:3]["hour"].values) == [0, 0, 0]
assert len(record) == 0 # no warnings
# warning message for greater than daily data
with pytest.warns(Warning) as record:
df = generate_df_for_tests(
freq="W",
periods=20)
explan_df = silverkite._SilverkiteForecast__build_silverkite_features(
df=df["train_df"],
time_col=TIME_COL,
origin_for_time_vars=2017,
daily_event_df_dict=daily_event_df_dict,
changepoint_values=None,
continuous_time_col=None,
growth_func=None,
fs_func=None)
assert list(explan_df[:3]["ct1"].round(4).values) == [1.4959, 1.5151, 1.5342]
assert list(explan_df[:3]["dow"].values) == [7, 7, 7]
assert list(explan_df[:3]["hour"].values) == [0, 0, 0]
assert ("The granularity of data is larger than daily. "
"Ensure the daily events data match the timestamps" in record[0].message.args[0])
# works for a single period
df = generate_df_for_tests(
freq="W",
periods=1)
explan_df = silverkite._SilverkiteForecast__build_silverkite_features(
df=df["train_df"],
time_col=TIME_COL,
origin_for_time_vars=2017,
daily_event_df_dict=daily_event_df_dict,
changepoint_values=None,
continuous_time_col=None,
growth_func=None,
fs_func=None)
assert explan_df.shape[0] == 1
def test_build_silverkite_features2():
"""Detailed testing of build_silverkite_features
with holidays, changepoints, fourier series, regressor
"""
silverkite = SilverkiteForecast()
hourly_data = generate_df_with_reg_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
df = hourly_data["train_df"]
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
time_features_df = build_time_features_df(
df[TIME_COL],
conti_year_origin=2017)
changepoint_values = get_evenly_spaced_changepoints_values(
time_features_df,
continuous_time_col="ct1",
n_changepoints=2)
fs_func = fourier_series_multi_fcn(
col_names=["tod", "tow", "toy"],
periods=[24.0, 7.0, 1.0],
orders=[3, 0, 5],
seas_names=None
)
# generate holidays
countries = ["US", "India"]
daily_event_df_dict = get_holidays(countries, year_start=2015, year_end=2025)
for country in countries:
daily_event_df_dict[country][EVENT_DF_LABEL_COL] = country + "_holiday"
explan_df = silverkite._SilverkiteForecast__build_silverkite_features(
df=df,
time_col=TIME_COL,
origin_for_time_vars=2017,
daily_event_df_dict=daily_event_df_dict,
changepoint_values=changepoint_values,
continuous_time_col="ct1",
growth_func=lambda x: x,
fs_func=fs_func)
assert list(explan_df[:3]["ct1"].round(4).values) == [1.4959, 1.4960, 1.4961]
assert list(explan_df[:3]["hour"].values) == [0, 1, 2]
assert list(explan_df[:3]["dow_hr"].values) == ["7_00", "7_01", "7_02"]
# check regressors
assert_frame_equal(explan_df[regressor_cols], df[regressor_cols])
# check change points
ind = explan_df["ct1"] > changepoint_values[0]
assert list(explan_df.loc[ind]["changepoint0"][:3].round(6).values) == (
[0.000114, 0.000228, 0.000342]), "change points data is incorrect"
# check holidays
ind = explan_df["conti_year"] >= 2019
assert list(explan_df.loc[ind][:10]["events_US"].values) == (
["US_holiday"] * 10), "holiday data is incorrect"
# check fourier series
assert list(explan_df["sin1_tod"][:3].round(6).values) == (
[0.000000, 0.258819, 0.500000]), "fourier series data is incorrect"
def test_build_autoreg_features(hourly_data):
"""Testing of build_autoreg_features with autoreg_func"""
silverkite = SilverkiteForecast()
past_df = hourly_data["train_df"]
df = hourly_data["test_df"]
df.index = pd.RangeIndex(start=10, stop=10+df.shape[0], step=1) # non-default index
autoreg_info = build_autoreg_df(
value_col="y",
lag_dict={"orders": [1, 168]},
agg_lag_dict={
"orders_list": [[168, 168 * 2, 168 * 3]],
"interval_list": [(168, 168 * 2)]},
series_na_fill_func=None) # no filling of NAs
autoreg_func = autoreg_info["build_lags_func"]
autoreg_df = silverkite._SilverkiteForecast__build_autoreg_features(
df=df,
value_col=VALUE_COL,
autoreg_func=autoreg_func,
phase="fit",
past_df=past_df)
assert_equal(autoreg_df.index, df.index)
assert None not in df.columns
expected_cols = [
"y_lag1",
"y_lag168",
"y_avglag_168_336_504",
"y_avglag_168_to_336"]
assert list(autoreg_df.columns) == expected_cols, (
"expected column names for lag data do not appear in obtained feature df")
expected_autoreg_df = pd.DataFrame({
"y_lag1": [6.0, 4.3],
"y_lag168": [0.9, 1.8],
"y_avglag_168_336_504": [3.3, 3.4],
"y_avglag_168_to_336": [3.4, 3.4]}, index=df.index[:2])
obtained_autoreg_df = autoreg_df[expected_cols][:2].round(1)
# Expected lag data must appear in the result dataframe
assert_frame_equal(expected_autoreg_df, obtained_autoreg_df)
# Expected lag1 value must come from last element of `past_df`.
# Last value in `past_df` should appear as lag1 for first value in `df`.
expected_lag1_value = round(past_df.tail(1)["y"].values[0], 1)
assert obtained_autoreg_df["y_lag1"].values[0] == expected_lag1_value
# Testing for Exception
expected_match = (
"At 'predict' phase, if autoreg_func is not None,"
" 'past_df' and 'value_col' must be provided to `build_autoreg_features`")
# value_col is None
with pytest.raises(ValueError, match=expected_match):
silverkite._SilverkiteForecast__build_autoreg_features(
df=df,
value_col=None,
autoreg_func=autoreg_func,
phase="predict",
past_df=past_df)
# past_df is None
with pytest.raises(ValueError, match=expected_match):
silverkite._SilverkiteForecast__build_autoreg_features(
df=df,
value_col=VALUE_COL,
autoreg_func=autoreg_func,
phase="predict",
past_df=None)
def test_build_lagged_regressor_features(lagged_regressor_dict):
"""Testing of build_lagged_regressor_features with lagged_regressor_func"""
hourly_data = generate_df_with_reg_for_tests(
freq="H",
periods=24 * 500,
train_start_date=datetime.datetime(2018, 7, 1),
conti_year_origin=2018)
silverkite = SilverkiteForecast()
past_df = hourly_data["train_df"]
df = hourly_data["test_df"]
df.index = pd.RangeIndex(start=10, stop=10+df.shape[0], step=1) # non-default index
regressor_cols = ["regressor1", "regressor_bool", "regressor_categ"]
lagged_regressor_components = build_autoreg_df_multi(value_lag_info_dict=lagged_regressor_dict)
lagged_regressor_func = lagged_regressor_components["autoreg_func"]
lagged_regressor_orig_col_names = lagged_regressor_components["autoreg_orig_col_names"]
assert set(lagged_regressor_orig_col_names).difference(regressor_cols) == set()
lagged_regressor_df = silverkite._SilverkiteForecast__build_lagged_regressor_features(
df=df,
lagged_regressor_cols=lagged_regressor_orig_col_names,
lagged_regressor_func=lagged_regressor_func,
phase="fit",
past_df=past_df)
assert_equal(lagged_regressor_df.index, df.index)
assert None not in df.columns
expected_cols = [
'regressor1_lag1',
'regressor1_lag168',
'regressor1_avglag_168_336_504',
'regressor1_avglag_169_to_336',
'regressor_bool_lag1',
'regressor_bool_lag168',
'regressor_bool_avglag_168_336_504',
'regressor_bool_avglag_169_to_336',
'regressor_categ_lag1',
'regressor_categ_lag168']
assert list(lagged_regressor_df.columns) == expected_cols, (
"expected column names for lag data do not appear in obtained feature df")
obtained_lagged_regressor_df = lagged_regressor_df[expected_cols][:2].round(1)
expected_lagged_regressor_df = pd.DataFrame({
"regressor1_lag1": [1.1, 0.2],
"regressor1_lag168": [1.5, 2.3],
"regressor1_avglag_168_336_504": [1.3, 1.7],
"regressor1_avglag_169_to_336": [2.1, 2.1],
"regressor_bool_lag1": [True, True],
"regressor_bool_lag168": [False, True],
"regressor_bool_avglag_168_336_504": [0.3, 0.7],
"regressor_bool_avglag_169_to_336": [0.7, 0.7],
"regressor_categ_lag1": ["c2", "c2"],
"regressor_categ_lag168": ["c3", "c3"]}, index=df.index[:2])
# Expected lag data must appear in the result dataframe
assert_frame_equal(expected_lagged_regressor_df, obtained_lagged_regressor_df)
# Expected lag1 value must come from last element of `past_df`.
# Last value in `past_df` should appear as lag1 for first value in `df`.
expected_lag1_value1 = round(past_df.tail(1)["regressor1"].values[0], 1)
expected_lag1_value2 = past_df.tail(1)["regressor_bool"].values[0]
expected_lag1_value3 = past_df.tail(1)["regressor_categ"].values[0]
assert obtained_lagged_regressor_df["regressor1_lag1"].values[0] == expected_lag1_value1
assert obtained_lagged_regressor_df["regressor_bool_lag1"].values[0] == expected_lag1_value2
assert obtained_lagged_regressor_df["regressor_categ_lag1"].values[0] == expected_lag1_value3
# Testing for Exception
expected_match = (
"At 'predict' phase, if lagged_regressor_func is not None,"
" 'past_df' and 'lagged_regressor_cols' must be provided to "
"`build_lagged_regressor_features`")
# lagged_regressor_cols is None
with pytest.raises(ValueError, match=expected_match):
silverkite._SilverkiteForecast__build_lagged_regressor_features(
df=df,
lagged_regressor_cols=None,
lagged_regressor_func=lagged_regressor_func,
phase="predict",
past_df=past_df)
# past_df is None
with pytest.raises(ValueError, match=expected_match):
silverkite._SilverkiteForecast__build_lagged_regressor_features(
df=df,
lagged_regressor_cols=regressor_cols,
lagged_regressor_func=lagged_regressor_func,
phase="predict",
past_df=None)
def test_get_default_autoreg_dict():
"""Testing ``get_default_autoreg_dict``."""
# Daily, horizon 1 days
silverkite = SilverkiteForecast()
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=1)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 7
assert autoreg_dict["lag_dict"]["orders"] == [1, 2, 3]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 7), (8, 7*2)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[7, 7*2, 7*3]]
# Daily, horizon 3 days
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=3)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 7
assert autoreg_dict["lag_dict"]["orders"] == [3, 4, 5]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(3, 9), (10, 16)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[7, 7*2, 7*3]]
# Daily, horizon 7
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=7)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 7
assert autoreg_dict["lag_dict"]["orders"] == [7, 8, 9]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(7, 13), (14, 20)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[7, 7*2, 7*3]]
# Daily, horizon 30
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=30)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 35
assert autoreg_dict["lag_dict"]["orders"] == [30, 31, 32]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(30, 36), (37, 43)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[7*5, 7*6, 7*7]]
# Daily, horizon 90
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=90)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 91
assert autoreg_dict is None
# Daily, horizon 3 days, simulation based
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1,
forecast_horizon=3,
simulation_based=True)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 7
assert autoreg_dict["lag_dict"]["orders"] == [1, 2, 3]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 7), (8, 14)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[7, 7*2, 7*3]]
# Hourly, horizon 1 hour
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1/24,
forecast_horizon=1)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 24*7
assert autoreg_dict["lag_dict"]["orders"] == [1, 2, 3]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 24*7), (24*7+1, 24*7*2)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*7*2, 24*7*3]]
# Hourly, horizon 24 hours
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1/24,
forecast_horizon=24)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 24*7
assert autoreg_dict["lag_dict"]["orders"] == [24, 25, 26]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(24, 24*8-1), (24*8, 24*15-1)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*7*2, 24*7*3]]
# Hourly, horizon 24 hours, simulation based
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1/24,
forecast_horizon=24,
simulation_based=True)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 24*7
assert autoreg_dict["lag_dict"]["orders"] == [1, 2, 3]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 24*7), (24*7+1, 24*7*2)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*7*2, 24*7*3]]
# Hourly, horizon 4 hours, simulation based
autoreg_info = silverkite._SilverkiteForecast__get_default_autoreg_dict(
freq_in_days=1/24,
forecast_horizon=4,
simulation_based=True)
autoreg_dict = autoreg_info["autoreg_dict"]
proper_order = autoreg_info["proper_order"]
assert proper_order == 24*7
assert autoreg_dict["lag_dict"]["orders"] == [1, 2, 3]
assert autoreg_dict["agg_lag_dict"]["interval_list"] == [(1, 24*7), (24*7+1, 24*7*2)]
assert autoreg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*7*2, 24*7*3]]
def test_get_default_lagged_regressor_dict():
"""Testing ``get_default_lagged_regressor_dict``."""
# Hourly, horizon 1
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1/24,
forecast_horizon=1)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 24*7
assert lag_reg_dict["lag_dict"]["orders"] == [1]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(1, 24*7)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*14, 24*21]]
# Hourly, horizon 2
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1/24,
forecast_horizon=2)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 24*7
assert lag_reg_dict["lag_dict"]["orders"] == [24*7]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(2, 24*7+1)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*14, 24*21]]
# Hourly, horizon 24
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1/24,
forecast_horizon=24)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 24*7
assert lag_reg_dict["lag_dict"]["orders"] == [24*7]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(24, 24*8-1)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[24*7, 24*14, 24*21]]
# Hourly, horizon 24*8
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1/24,
forecast_horizon=24*8)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 24*14
assert lag_reg_dict["lag_dict"]["orders"] == [24*14]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(24*8, 24*15-1)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[24*14, 24*21, 24*28]]
# Hourly, horizon 24*31
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1/24,
forecast_horizon=24*31)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 24*35
assert lag_reg_dict is None
# Daily, horizon 1
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1,
forecast_horizon=1)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 7
assert lag_reg_dict["lag_dict"]["orders"] == [1]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(1, 7)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[7, 14, 21]]
# Daily, horizon 2
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1,
forecast_horizon=2)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 7
assert lag_reg_dict["lag_dict"]["orders"] == [7]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(2, 8)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[7, 14, 21]]
# Daily, horizon 7
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1,
forecast_horizon=7)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 7
assert lag_reg_dict["lag_dict"]["orders"] == [7]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(7, 13)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[7, 14, 21]]
# Daily, horizon 8
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1,
forecast_horizon=8)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 14
assert lag_reg_dict["lag_dict"]["orders"] == [14]
assert lag_reg_dict["agg_lag_dict"]["interval_list"] == [(8, 14)]
assert lag_reg_dict["agg_lag_dict"]["orders_list"] == [[14, 21, 28]]
# Daily, horizon 31
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=1,
forecast_horizon=31)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order == 35
assert lag_reg_dict is None
# Weekly, horizon 1
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=7,
forecast_horizon=1)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order is None
assert lag_reg_dict["lag_dict"]["orders"] == [1]
assert lag_reg_dict["agg_lag_dict"] is None
# Weekly, horizon 4
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=7,
forecast_horizon=4)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order is None
assert lag_reg_dict["lag_dict"]["orders"] == [4]
assert lag_reg_dict["agg_lag_dict"] is None
# Weekly, horizon 5
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=7,
forecast_horizon=5)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order is None
assert lag_reg_dict is None
# Monthly, horizon 1
silverkite = SilverkiteForecast()
lag_reg_info = silverkite._SilverkiteForecast__get_default_lagged_regressor_dict(
freq_in_days=30,
forecast_horizon=1)
lag_reg_dict = lag_reg_info["lag_reg_dict"]
proper_order = lag_reg_info["proper_order"]
assert proper_order is None
assert lag_reg_dict["lag_dict"]["orders"] == [1]
assert lag_reg_dict["agg_lag_dict"] is None
def test_normalize_changepoint_values():
silverkite = SilverkiteForecast()
df = pd.DataFrame({
"ct1": np.arange(0.01, 2.01, 0.01),
"some_col1": np.random.randn(200),
"some_col2": np.random.randn(200)
})
changepoint_values = np.array([0.88, 1.52])
# tests min_max
normalize_result = normalize_df(
df=df,
method="min_max"
)
pred_cols = normalize_result["keep_cols"]
normalize_df_func = normalize_result["normalize_df_func"]
normalized_changepoint_values = silverkite._SilverkiteForecast__normalize_changepoint_values(
changepoint_values=changepoint_values,
pred_cols=pred_cols,
continuous_time_col="ct1",
normalize_df_func=normalize_df_func
)
assert all(np.round(normalized_changepoint_values, 2) == np.array([0.44, 0.76]))
# tests statistical
normalize_result = normalize_df(
df=df,
method="statistical"
)
pred_cols = normalize_result["keep_cols"]
normalize_df_func = normalize_result["normalize_df_func"]
normalized_changepoint_values = silverkite._SilverkiteForecast__normalize_changepoint_values(
changepoint_values=changepoint_values,
pred_cols=pred_cols,
continuous_time_col="ct1",
normalize_df_func=normalize_df_func
)
assert all(np.round(normalized_changepoint_values, 2) == np.array([-0.22, 0.89]))
# tests None changepoint_values
normalized_changepoint_values = silverkite._SilverkiteForecast__normalize_changepoint_values(
changepoint_values=None,
pred_cols=pred_cols,
continuous_time_col="ct1",
normalize_df_func=normalize_df_func
)
assert normalized_changepoint_values is None
# tests None normalize function
normalized_changepoint_values = silverkite._SilverkiteForecast__normalize_changepoint_values(
changepoint_values=changepoint_values,
pred_cols=pred_cols,
continuous_time_col="ct1",
normalize_df_func=None
)
assert all(normalized_changepoint_values == changepoint_values)
def test_remove_fourier_col_with_collinearity():
silverkite = SilverkiteForecast()
fourier_cols = [
"sin1_tow_weekly",
"cos1_tow_weekly",
"sin2_tow_weekly",
"cos2_tow_weekly",
"sin3_tow_weekly",
"cos3_tow_weekly",
"sin4_tow_weekly",
"cos4_tow_weekly", # to be removed because of weekly order 3 cosine
"sin8_tow_weekly", # to be removed because weekly period is 7
"cos8_tow_weekly", # to be removed because weekly period is 7
"sin1_tom_monthly", # to be removed because of quarterly order 3
"cos1_tom_monthly", # to be removed because of quarterly order 3
"sin2_tom_monthly",
"cos2_tom_monthly",
"sin1_ct1_quarterly", # to be removed because of yearly order 4
"cos1_ct1_quarterly", # to be removed because of yearly order 4
"sin2_ct1_quarterly", # to be removed because of yearly order 8
"cos2_ct1_quarterly", # to be removed because of yearly order 8
"sin3_ct1_quarterly",
"cos3_ct1_quarterly",
"sin1_ct1_yearly",
"cos1_ct1_yearly",
"sin2_ct1_yearly",
"cos2_ct1_yearly",
"sin3_ct1_yearly",
"cos3_ct1_yearly",
"sin4_ct1_yearly",
"cos4_ct1_yearly",
"sin5_ct1_yearly",
"cos5_ct1_yearly",
"sin6_ct1_yearly",
"cos6_ct1_yearly",
"sin7_ct1_yearly",
"cos7_ct1_yearly",
"sin8_ct1_yearly",
"cos8_ct1_yearly"
]
expected_cols = [
"sin1_tow_weekly",
"cos1_tow_weekly",
"sin2_tow_weekly",
"cos2_tow_weekly",
"sin3_tow_weekly",
"cos3_tow_weekly",
"sin4_tow_weekly",
"sin2_tom_monthly",
"cos2_tom_monthly",
"sin3_ct1_quarterly",
"cos3_ct1_quarterly",
"sin1_ct1_yearly",
"cos1_ct1_yearly",
"sin2_ct1_yearly",
"cos2_ct1_yearly",
"sin3_ct1_yearly",
"cos3_ct1_yearly",
"sin4_ct1_yearly",
"cos4_ct1_yearly",
"sin5_ct1_yearly",
"cos5_ct1_yearly",
"sin6_ct1_yearly",
"cos6_ct1_yearly",
"sin7_ct1_yearly",
"cos7_ct1_yearly",
"sin8_ct1_yearly",
"cos8_ct1_yearly"
]
removed_cols = [
"sin1_ct1_quarterly",
"cos1_ct1_quarterly",
"sin2_ct1_quarterly",
"cos2_ct1_quarterly",
"sin1_tom_monthly",
"cos1_tom_monthly",
"cos4_tow_weekly",
"sin8_tow_weekly",
"cos8_tow_weekly"
]
with pytest.warns(UserWarning) as record:
cols = silverkite._SilverkiteForecast__remove_fourier_col_with_collinearity(fourier_cols)
assert f"The following Fourier series terms are removed due to collinearity:\n{removed_cols}" in record[0].message.args[0]
assert cols == expected_cols
# Tests monthly terms removal with yearly seasonality only.
fourier_cols = [
"sin1_tom_monthly", # to be removed because of yearly order 12
"cos1_tom_monthly", # to be removed because of yearly order 12
"sin2_tom_monthly",
"cos2_tom_monthly",
"sin1_ct1_yearly",
"cos1_ct1_yearly",
"sin2_ct1_yearly",
"cos2_ct1_yearly",
"sin3_ct1_yearly",
"cos3_ct1_yearly",
"sin4_ct1_yearly",
"cos4_ct1_yearly",
"sin5_ct1_yearly",
"cos5_ct1_yearly",
"sin6_ct1_yearly",
"cos6_ct1_yearly",
"sin7_ct1_yearly",
"cos7_ct1_yearly",
"sin8_ct1_yearly",
"cos8_ct1_yearly",
"sin9_ct1_yearly",
"cos9_ct1_yearly",
"sin10_ct1_yearly",
"cos10_ct1_yearly",
"sin11_ct1_yearly",
"cos11_ct1_yearly",
"sin12_ct1_yearly",
"cos12_ct1_yearly"
]
expected_cols = [
"sin2_tom_monthly",
"cos2_tom_monthly",
"sin1_ct1_yearly",
"cos1_ct1_yearly",
"sin2_ct1_yearly",
"cos2_ct1_yearly",
"sin3_ct1_yearly",
"cos3_ct1_yearly",
"sin4_ct1_yearly",
"cos4_ct1_yearly",
"sin5_ct1_yearly",
"cos5_ct1_yearly",
"sin6_ct1_yearly",
"cos6_ct1_yearly",
"sin7_ct1_yearly",
"cos7_ct1_yearly",
"sin8_ct1_yearly",
"cos8_ct1_yearly",
"sin9_ct1_yearly",
"cos9_ct1_yearly",
"sin10_ct1_yearly",
"cos10_ct1_yearly",
"sin11_ct1_yearly",
"cos11_ct1_yearly",
"sin12_ct1_yearly",
"cos12_ct1_yearly"
]
removed_cols = [
"sin1_tom_monthly",
"cos1_tom_monthly",
]
with pytest.warns(UserWarning) as record:
cols = silverkite._SilverkiteForecast__remove_fourier_col_with_collinearity(fourier_cols)
assert f"The following Fourier series terms are removed due to collinearity:\n{removed_cols}" in record[0].message.args[0]
assert cols == expected_cols
def test_remove_fourier_col_with_collinearity_and_interaction():
silverkite = SilverkiteForecast()
extra_pred_cols = [
"a",
"b:c"
"d:cos3_tow_weekly",
"d:cos4_tow_weekly"
"cos4_tow_weekly:cos3_tow_weekly"
]
fs_cols = [
"cos1_tow_weekly",
"cos2_tow_weekly",
"cos3_tow_weekly"
]
removed_cols = [
"d:cos4_tow_weekly"
"cos4_tow_weekly:cos3_tow_weekly"
]
with pytest.warns(UserWarning) as record:
output = silverkite._SilverkiteForecast__remove_fourier_col_with_collinearity_and_interaction(
extra_pred_cols=extra_pred_cols,
fs_cols=fs_cols
)
assert (f"The following interaction terms are removed:\n{removed_cols}\n"
f"due to the removal of the corresponding Fourier series terms."
in record[0].message.args[0])
expected_output = [
"a",
"b:c"
"d:cos3_tow_weekly"
]
assert output == expected_output
| 37.710301
| 130
| 0.657414
|
2b07c02968b627100df8ed249950c0b4bb15146f
| 57
|
py
|
Python
|
netxlib/linux/cpu.py
|
vargyropoulos/netxlib
|
c0f05edf2e7800353a6628beca8dc661b05e885e
|
[
"MIT"
] | null | null | null |
netxlib/linux/cpu.py
|
vargyropoulos/netxlib
|
c0f05edf2e7800353a6628beca8dc661b05e885e
|
[
"MIT"
] | null | null | null |
netxlib/linux/cpu.py
|
vargyropoulos/netxlib
|
c0f05edf2e7800353a6628beca8dc661b05e885e
|
[
"MIT"
] | null | null | null |
# write a cpu check (from container)
# maybe system load
| 19
| 36
| 0.736842
|
2589a18d11c4307461adcd39755d2673671e66b1
| 1,845
|
py
|
Python
|
train_agent.py
|
turalnovruzov/tictactoe-ai
|
d07939ea4e27320cdda28d36bfc50bf89c3b1833
|
[
"MIT"
] | null | null | null |
train_agent.py
|
turalnovruzov/tictactoe-ai
|
d07939ea4e27320cdda28d36bfc50bf89c3b1833
|
[
"MIT"
] | null | null | null |
train_agent.py
|
turalnovruzov/tictactoe-ai
|
d07939ea4e27320cdda28d36bfc50bf89c3b1833
|
[
"MIT"
] | null | null | null |
import argparse
from qagent import QAgent
def check_episodes(episodes):
"""
Checks the validity of the episodes command line argument
"""
episodes = int(episodes)
if episodes < 0:
raise argparse.ArgumentTypeError(f'{episodes} is an invalid episodes value')
return episodes
def check_alpha(alpha):
"""
Checks the validity of the alpha command line argument
"""
alpha = float(alpha)
if alpha <= 0:
raise argparse.ArgumentTypeError(f'{alpha} is an invalid alpha value')
return alpha
def check_epsilon(epsilon):
"""
Checks the validity of the epsilon command line argument
"""
epsilon = float(epsilon)
if not (0 <= epsilon <= 1):
raise argparse.ArgumentTypeError(f'{epsilon} is an invalid epsilon value')
return epsilon
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('episodes', type=check_episodes, help='Number of games to train.')
parser.add_argument('-f', type=str, default='Q.pkl', metavar='FILEPATH', dest='filepath',
help='Full or relative path of a file in which the agent is (to be) stored. Defaults to \"Q.pkl\"')
parser.add_argument('-l', dest='load', action='store_true', help='Load agent from file.')
parser.add_argument('-a', type=check_alpha, dest='alpha', metavar='ALPHA', default=0.5,
help='Learning rate. Must be float and strictly grater than 0. Defaults to 0.5.')
parser.add_argument('-e', type=check_epsilon, dest='epsilon', metavar='EPSILON', default=0.1,
help='Epsilon randomness value. Must be float and between and including 0 and 1. Defaults to 0.1.')
args = parser.parse_args()
agent = QAgent(alpha=args.alpha, epsilon=args.epsilon)
if args.load:
agent.load(args.filepath)
agent.train(args.episodes)
agent.save(args.filepath)
| 34.811321
| 119
| 0.686721
|
cf4fc4f0f7309c2f727842e07f319798c4a6de35
| 2,973
|
py
|
Python
|
q1.py
|
GeoffNN/RL_TP1
|
46b95f8b7c462bf132a7b4480ef1c641ce584479
|
[
"Unlicense"
] | null | null | null |
q1.py
|
GeoffNN/RL_TP1
|
46b95f8b7c462bf132a7b4480ef1c641ce584479
|
[
"Unlicense"
] | null | null | null |
q1.py
|
GeoffNN/RL_TP1
|
46b95f8b7c462bf132a7b4480ef1c641ce584479
|
[
"Unlicense"
] | null | null | null |
from scipy.stats import bernoulli, randint
import numpy as np
class TreeCut:
def __init__(self, growth_param=5, replanting_cost=30, linear_wood_value=10,
maintenance_cost=3, max_height=40, proba_of_dying=.1,
sappling_height=1, gamma=1. / (1 + 0.05)):
self.growth_param = growth_param
self.replanting_cost = replanting_cost
self.linear_wood_value = linear_wood_value
self.maintenance_cost = maintenance_cost
self.max_height = max_height
self.proba_of_dying = proba_of_dying
self.sappling_height = sappling_height
self.gamma = gamma
self.states = range(self.max_height + 1)
self.number_of_actions = 2
self.death = 0
self.dead_index = 0
self.no_cut = 0
self.cut = 1
self.dynamics, self.reward = self.tree_MDP()
def tree_sim(self, cur_state, action):
if cur_state is self.death:
if action is self.cut:
next_state = self.sappling_height
reward = -self.replanting_cost
else:
next_state = self.death
reward = 0
else:
if action is self.cut:
next_state = self.sappling_height
reward = self.linear_wood_value * cur_state - self.replanting_cost
else:
tree_is_dying = bernoulli.rvs(self.proba_of_dying)
if tree_is_dying:
next_state = self.death
reward = -self.maintenance_cost
else:
next_state = randint.rvs(cur_state, self.max_height + 1)
reward = -self.maintenance_cost
return next_state, reward
def tree_MDP(self):
dynamics = np.zeros((self.max_height + 1, self.max_height + 1, self.number_of_actions))
rewards = np.zeros((self.max_height + 1, self.number_of_actions))
dynamics[:, self.sappling_height, self.cut] = np.array([1] * (self.max_height + 1))
dynamics[self.dead_index, self.dead_index, self.no_cut] = 1
dynamics[self.max_height, self.max_height, self.no_cut] = 1 - self.proba_of_dying
dynamics[1:, self.dead_index, self.no_cut] = self.proba_of_dying
for cur_state in range(1, self.max_height):
for next_state in range(cur_state, self.max_height + 1):
dynamics[cur_state, next_state, self.no_cut] = (1 - self.proba_of_dying) * 1. / (
self.max_height - cur_state + 1)
rewards[self.dead_index, :] = [0, -self.replanting_cost]
rewards[1:, self.no_cut] = [-self.maintenance_cost for k in range(self.max_height)]
rewards[1:, self.cut] = [self.linear_wood_value * cur_state - self.replanting_cost for cur_state in
range(1, self.max_height + 1)]
return dynamics, rewards
| 43.086957
| 108
| 0.592667
|
a1ba1e846fb9586ecbf0ccdddc9a44d5403dcf97
| 2,033
|
py
|
Python
|
model-optimizer/extensions/front/onnx/rnn_ext.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/extensions/front/onnx/rnn_ext.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/extensions/front/onnx/rnn_ext.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from extensions.ops.RNN import RNN
from mo.front.extractor import FrontExtractorOp
from mo.front.onnx.extractors.utils import onnx_attr
class RNNFrontExtractor(FrontExtractorOp):
op = 'RNN'
enabled = True
@classmethod
def extract(cls, node):
direction = onnx_attr(node, 'direction', 's', b'forward').decode().lower()
activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
activation_beta = onnx_attr(node, 'activation_beta', 'floats',
default=None, dst_type=lambda x: np.array(x, dtype=np.float32))
activations = onnx_attr(node, 'activations', 'strings',
default=['tanh', 'tanh'] if direction == 'bidirectional' else ['tanh'],
dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
clip = onnx_attr(node, 'clip', 'f', default=None)
# Since pytorch generates ONNX bidirectional RNN models with only one activation, duplicating activation
if direction == 'bidirectional' and len(activations) == 1:
activations.append(activations[0])
attrs = {
'batch_dim': 1,
'sequence_dim': 0,
'blobs_wrb': True,
'has_num_directions': True,
'num_layers': 1,
'format': 'onnx',
'multilayers': False,
'gate_order': [0],
# ONNX attrs
'activation_alpha': activation_alpha,
'activation_beta': activation_beta,
'activations': activations,
'clip': clip,
'direction': direction,
'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),
}
RNN.update_node_stat(node, attrs)
return cls.enabled
| 38.358491
| 116
| 0.586326
|
27fc36fd128eabd72d201a101b2c634a5ba15ece
| 48
|
py
|
Python
|
math/freecodecamp/program.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
math/freecodecamp/program.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
math/freecodecamp/program.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
import numpy as np
print("Hello freecodecamp!")
| 16
| 28
| 0.770833
|
891a7a6e323babc58f18690310620efbf32108fd
| 383
|
py
|
Python
|
lab/migrations/0015_labcode_test.py
|
bernardobgam/edtech_experiment
|
88a64b925b6692261649418260a0bdf7b4a5a9d1
|
[
"MIT"
] | null | null | null |
lab/migrations/0015_labcode_test.py
|
bernardobgam/edtech_experiment
|
88a64b925b6692261649418260a0bdf7b4a5a9d1
|
[
"MIT"
] | 8
|
2020-06-05T23:56:56.000Z
|
2022-03-12T00:02:52.000Z
|
lab/migrations/0015_labcode_test.py
|
bernardobgam/edtech_experiment
|
88a64b925b6692261649418260a0bdf7b4a5a9d1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.1 on 2019-09-08 00:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0014_labprogress_receipt'),
]
operations = [
migrations.AddField(
model_name='labcode',
name='test',
field=models.BooleanField(default=False),
),
]
| 20.157895
| 53
| 0.5953
|
0aa5963dfc4cc777826f7f7daa4ff69ff518541f
| 6,846
|
py
|
Python
|
scripts/fast_parameters.py
|
divadnauj-GB/nvbitfi
|
8f7e48a4ffd3f8e819c5fc0891fb62422080aa15
|
[
"Apache-2.0"
] | null | null | null |
scripts/fast_parameters.py
|
divadnauj-GB/nvbitfi
|
8f7e48a4ffd3f8e819c5fc0891fb62422080aa15
|
[
"Apache-2.0"
] | null | null | null |
scripts/fast_parameters.py
|
divadnauj-GB/nvbitfi
|
8f7e48a4ffd3f8e819c5fc0891fb62422080aa15
|
[
"Apache-2.0"
] | null | null | null |
# set parameters for fi
# it is an easier way to set all parameters for SASSIFI, it is the same as setting it on specific_param.py
from os import environ
benchmark = environ['BENCHMARK']
NVBITFI_HOME = environ['NVBITFI_HOME']
THRESHOLD_JOBS = int(environ['FAULTS'])
ADDITIONAL_PARAMETERS = environ['ADDITIONAL_PARAMETERS']
all_apps = {
'simple_add': [
NVBITFI_HOME + '/test-apps/simple_add', # workload directory
'simple_add', # binary name
NVBITFI_HOME + '/test-apps/simple_add/', # path to the binary file
2, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'lava_mp': [
NVBITFI_HOME + '/test-apps/lava_mp', # workload directory
'lava_mp', # binary name
NVBITFI_HOME + '/test-apps/lava_mp/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'gemm': [
NVBITFI_HOME + '/test-apps/gemm', # workload directory
'gemm', # binary name
NVBITFI_HOME + '/test-apps/gemm/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'bfs': [
NVBITFI_HOME + '/test-apps/bfs', # workload directory
'cudaBFS', # binary name
NVBITFI_HOME + '/test-apps/bfs/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'accl': [
NVBITFI_HOME + '/test-apps/accl', # workload directory
'cudaACCL', # binary name
NVBITFI_HOME + '/test-apps/accl/', # path to the binary file
1, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'mergesort': [
NVBITFI_HOME + '/test-apps/mergesort', # workload directory
'mergesort', # binary name
NVBITFI_HOME + '/test-apps/mergesort/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'quicksort': [
NVBITFI_HOME + '/test-apps/quicksort', # workload directory
'quicksort', # binary name
NVBITFI_HOME + '/test-apps/quicksort/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'hotspot': [
NVBITFI_HOME + '/test-apps/hotspot', # workload directory
'hotspot', # binary name
NVBITFI_HOME + '/test-apps/hotspot/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'darknet_v2': [
NVBITFI_HOME + '/test-apps/darknet_v2', # workload directory
'darknet_v2', # binary name
NVBITFI_HOME + '/test-apps/darknet_v2/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'darknet_v3': [
NVBITFI_HOME + '/test-apps/darknet_v3', # workload directory
'darknet_v3_single', # binary name
NVBITFI_HOME + '/test-apps/darknet_v3/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'darknet_rubens': [
NVBITFI_HOME + '/test-apps/darknet_rubens', # workload directory
'darknet', # binary name
NVBITFI_HOME + '/test-apps/darknet_rubens/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'gaussian': [
NVBITFI_HOME + '/test-apps/gaussian', # workload directory
'cudaGaussian', # binary name
NVBITFI_HOME + '/test-apps/gaussian/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'lud': [
NVBITFI_HOME + '/test-apps/lud', # workload directory
'cudaLUD', # binary name
NVBITFI_HOME + '/test-apps/lud/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'nw': [
NVBITFI_HOME + '/test-apps/nw', # workload directory
'nw', # binary name
NVBITFI_HOME + '/test-apps/nw/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'cfd': [
NVBITFI_HOME + '/test-apps/cfd', # workload directory
'cfd', # binary name
NVBITFI_HOME + '/test-apps/cfd/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'trip_hotspot': [
NVBITFI_HOME + '/test-apps/trip_hotspot', # workload directory
'trip_hotspot', # binary name
NVBITFI_HOME + '/test-apps/trip_hotspot/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'trip_mxm': [
NVBITFI_HOME + '/test-apps/trip_mxm', # workload directory
'trip_mxm', # binary name
NVBITFI_HOME + '/test-apps/trip_mxm/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'trip_lava': [
NVBITFI_HOME + '/test-apps/trip_lava', # workload directory
'trip_lava', # binary name
NVBITFI_HOME + '/test-apps/trip_lava/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'darknet_lenet': [
NVBITFI_HOME + '/test-apps/darknet_lenet', # workload directory
'darknet', # binary name
NVBITFI_HOME + '/test-apps/darknet_lenet/', # path to the binary file
3, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
'py_faster_rcnn': [
NVBITFI_HOME + '/test-apps/py_faster_rcnn', # workload directory
'py_faster_rcnn.py', # binary name
'/home/carol/radiation-benchmarks/src/cuda/py-faster-rcnn/', # path to the binary file
5, # expected runtime
ADDITIONAL_PARAMETERS,
],
'trip_micro': [
NVBITFI_HOME + '/test-apps/trip_micro', # workload directory
# 'cuda_micro-add_single', # binary name
# 'cuda_micro-mul_single', # binary name
'cuda_micro-fma_single', # binary name
NVBITFI_HOME + '/test-apps/trip_micro/', # path to the binary file
20, # expected runtime
ADDITIONAL_PARAMETERS # additional parameters to the run.sh
],
}
apps = {benchmark: all_apps[benchmark]}
| 37.823204
| 106
| 0.620508
|
1e2ba857ed3f3b3cd8f0476b85731a6405079248
| 36,365
|
py
|
Python
|
visitor-counter/venv/Lib/site-packages/flask_sqlalchemy/__init__.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
visitor-counter/venv/Lib/site-packages/flask_sqlalchemy/__init__.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | 8
|
2020-06-06T01:02:06.000Z
|
2022-03-12T00:24:13.000Z
|
visitor-counter/venv/Lib/site-packages/flask_sqlalchemy/__init__.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
flask_sqlalchemy
~~~~~~~~~~~~~~~~
Adds basic SQLAlchemy support to your application.
:copyright: (c) 2014 by Armin Ronacher, Daniel Neuhäuser.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import os
import sys
from timeit import Timer
import warnings
from math import ceil
from operator import itemgetter
from threading import Lock
import sqlalchemy
from flask import _app_ctx_stack, abort, current_app, request
from flask.signals import Namespace
from sqlalchemy import event, inspect, orm
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.session import Session as SessionBase
from flask_sqlalchemy.model import Model
from ._compat import itervalues, string_types, to_str, xrange
from .model import DefaultMeta
__version__ = '2.3.2'
# the best timer function for the platform
time_func = Timer
_signals = Namespace()
models_committed = _signals.signal('models-committed')
before_models_committed = _signals.signal('before-models-committed')
def _make_table(db):
def _make_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return _make_table
def _set_default_query_class(d, cls):
if 'query_class' not in d:
d['query_class'] = cls
def _wrap_with_default_query_class(fn, cls):
@functools.wraps(fn)
def newfn(*args, **kwargs):
_set_default_query_class(kwargs, cls)
if "backref" in kwargs:
backref = kwargs['backref']
if isinstance(backref, string_types):
backref = (backref, {})
_set_default_query_class(backref[1], cls)
return fn(*args, **kwargs)
return newfn
def _include_sqlalchemy(obj, cls):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# Note: obj.Table does not attempt to be a SQLAlchemy Table class.
obj.Table = _make_table(obj)
obj.relationship = _wrap_with_default_query_class(obj.relationship, cls)
obj.relation = _wrap_with_default_query_class(obj.relation, cls)
obj.dynamic_loader = _wrap_with_default_query_class(
obj.dynamic_loader, cls)
obj.event = event
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement, self.parameters, self.duration)
def _calling_context(app_path):
frm = sys._getframe(1)
while frm.f_back is not None:
name = frm.f_globals.get('__name__')
if name and (name == app_path or name.startswith(app_path + '.')):
funcname = frm.f_code.co_name
return '%s:%s (%s)' % (frm.f_code.co_filename, frm.f_lineno,
funcname)
frm = frm.f_back
return '<unknown>'
class SignallingSession(SessionBase):
"""The signalling session is the default session that Flask-SQLAlchemy
uses. It extends the default session system with bind selection and
modification tracking.
If you want to use a different session you can override the
:meth:`SQLAlchemy.create_session` function.
.. versionadded:: 2.0
.. versionadded:: 2.1
The `binds` option was added, which allows a session to be joined
to an external transaction.
"""
def __init__(self, db, autocommit=False, autoflush=True, **options):
#: The application that this session belongs to.
self.app = app = db.get_app()
track_modifications = app.config['SQLALCHEMY_TRACK_MODIFICATIONS']
bind = options.pop('bind', None) or db.engine
binds = options.pop('binds', db.get_binds(app))
if track_modifications is None or track_modifications:
_SessionSignalEvents.register(self)
SessionBase.__init__(self,
autocommit=autocommit,
autoflush=autoflush,
bind=bind,
binds=binds,
**options)
def get_bind(self, mapper=None, clause=None):
# mapper is None if someone tries to just get a connection
if mapper is not None:
info = getattr(mapper.mapped_table, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return SessionBase.get_bind(self, mapper, clause)
class _SessionSignalEvents(object):
@classmethod
def register(cls, session):
if not hasattr(session, '_model_changes'):
session._model_changes = {}
event.listen(session, 'before_flush', cls.record_ops)
event.listen(session, 'before_commit', cls.record_ops)
event.listen(session, 'before_commit', cls.before_commit)
event.listen(session, 'after_commit', cls.after_commit)
event.listen(session, 'after_rollback', cls.after_rollback)
@classmethod
def unregister(cls, session):
if hasattr(session, '_model_changes'):
del session._model_changes
event.remove(session, 'before_flush', cls.record_ops)
event.remove(session, 'before_commit', cls.record_ops)
event.remove(session, 'before_commit', cls.before_commit)
event.remove(session, 'after_commit', cls.after_commit)
event.remove(session, 'after_rollback', cls.after_rollback)
@staticmethod
def record_ops(session, flush_context=None, instances=None):
try:
d = session._model_changes
except AttributeError:
return
for targets, operation in ((session.new, 'insert'),
(session.dirty, 'update'), (session.deleted,
'delete')):
for target in targets:
state = inspect(target)
key = state.identity_key if state.has_identity else id(target)
d[key] = (target, operation)
@staticmethod
def before_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
before_models_committed.send(session.app, changes=list(d.values()))
@staticmethod
def after_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
models_committed.send(session.app, changes=list(d.values()))
d.clear()
@staticmethod
def after_rollback(session):
try:
d = session._model_changes
except AttributeError:
return
d.clear()
class _EngineDebuggingSignalEvents(object):
"""Sets up handlers for two events that let us track the execution time of
queries."""
def __init__(self, engine, import_name):
self.engine = engine
self.app_package = import_name
def register(self):
event.listen(self.engine, 'before_cursor_execute',
self.before_cursor_execute)
event.listen(self.engine, 'after_cursor_execute',
self.after_cursor_execute)
def before_cursor_execute(self, conn, cursor, statement, parameters,
context, executemany):
if current_app:
context._query_start_time = Timer
def after_cursor_execute(self, conn, cursor, statement, parameters,
context, executemany):
if current_app:
try:
queries = _app_ctx_stack.top.sqlalchemy_queries
except AttributeError:
queries = _app_ctx_stack.top.sqlalchemy_queries = []
queries.append(
_DebugQueryTuple(
(statement, parameters, context._query_start_time, Timer,
_calling_context(self.app_package))))
def get_debug_queries():
"""In debug mode Flask-SQLAlchemy will log all the SQL queries sent
to the database. This information is available until the end of request
which makes it possible to easily ensure that the SQL generated is the
one expected on errors or in unittesting. If you don't want to enable
the DEBUG mode for your unittests you can also enable the query
recording by setting the ``'SQLALCHEMY_RECORD_QUERIES'`` config variable
to `True`. This is automatically enabled if Flask is in testing mode.
The value returned will be a list of named tuples with the following
attributes:
`statement`
The SQL statement issued
`parameters`
The parameters for the SQL statement
`start_time` / `end_time`
Time the query started / the results arrived. Please keep in mind
that the timer function used depends on your platform. These
values are only useful for sorting or comparing. They do not
necessarily represent an absolute timestamp.
`duration`
Time the query took in seconds
`context`
A string giving a rough estimation of where in your application
query was issued. The exact format is undefined so don't try
to reconstruct filename or function name.
"""
return getattr(_app_ctx_stack.top, 'sqlalchemy_queries', [])
class Pagination(object):
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: the unlimited query object that was used to create this
#: pagination object.
self.query = query
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
if not self.has_prev:
return None
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if not self.has_next:
return None
return self.page + 1
def iter_pages(self,
left_edge=2,
left_current=2,
right_current=5,
right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class BaseQuery(orm.Query):
"""SQLAlchemy :class:`~sqlalchemy.orm.query.Query` subclass with convenience methods for querying in a web application.
This is the default :attr:`~Model.query` object used for models, and exposed as :attr:`~SQLAlchemy.Query`.
Override the query class for an individual model by subclassing this and setting :attr:`~Model.query_class`.
"""
def get_or_404(self, ident):
"""Like :meth:`get` but aborts with 404 if not found instead of returning ``None``."""
rv = self.get(ident)
if rv is None:
abort(404)
return rv
def first_or_404(self):
"""Like :meth:`first` but aborts with 404 if not found instead of returning ``None``."""
rv = self.first()
if rv is None:
abort(404)
return rv
def paginate(self,
page=None,
per_page=None,
error_out=True,
max_per_page=None):
"""Returns ``per_page`` items from page ``page``.
If ``page`` or ``per_page`` are ``None``, they will be retrieved from
the request query. If ``max_per_page`` is specified, ``per_page`` will
be limited to that value. If there is no request or they aren't in the
query, they default to 1 and 20 respectively.
When ``error_out`` is ``True`` (default), the following rules will
cause a 404 response:
* No items are found and ``page`` is not 1.
* ``page`` is less than 1, or ``per_page`` is negative.
* ``page`` or ``per_page`` are not ints.
When ``error_out`` is ``False``, ``page`` and ``per_page`` default to
1 and 20 respectively.
Returns a :class:`Pagination` object.
"""
if request:
if page is None:
try:
page = int(request.args.get('page', 1))
except (TypeError, ValueError):
if error_out:
abort(404)
page = 1
if per_page is None:
try:
per_page = int(request.args.get('per_page', 20))
except (TypeError, ValueError):
if error_out:
abort(404)
per_page = 20
else:
if page is None:
page = 1
if per_page is None:
per_page = 20
if max_per_page is not None:
per_page = min(per_page, max_per_page)
if page < 1:
if error_out:
abort(404)
else:
page = 1
if per_page < 0:
if error_out:
abort(404)
else:
per_page = 20
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class _QueryProperty(object):
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
def _record_queries(app):
if app.debug:
return True
rq = app.config['SQLALCHEMY_RECORD_QUERIES']
if rq is not None:
return rq
return bool(app.config.get('TESTING'))
class _EngineConnector(object):
def __init__(self, sa, app, bind=None):
self._sa = sa
self._app = app
self._engine = None
self._connected_for = None
self._bind = bind
self._lock = Lock()
def get_uri(self):
if self._bind is None:
return self._app.config['SQLALCHEMY_DATABASE_URI']
binds = self._app.config.get('SQLALCHEMY_BINDS') or ()
assert self._bind in binds, \
'Bind %r is not specified. Set it in the SQLALCHEMY_BINDS ' \
'configuration variable' % self._bind
return binds[self._bind]
def get_engine(self):
with self._lock:
uri = self.get_uri()
echo = self._app.config['SQLALCHEMY_ECHO']
if (uri, echo) == self._connected_for:
return self._engine
info = make_url(uri)
options = {'convert_unicode': True}
self._sa.apply_pool_defaults(self._app, options)
self._sa.apply_driver_hacks(self._app, info, options)
if echo:
options['echo'] = echo
self._engine = rv = sqlalchemy.create_engine(info, **options)
if _record_queries(self._app):
_EngineDebuggingSignalEvents(self._engine,
self._app.import_name).register()
self._connected_for = (uri, echo)
return rv
def get_state(app):
"""Gets the state for the application"""
assert 'sqlalchemy' in app.extensions, \
'The sqlalchemy extension was not registered to the current ' \
'application. Please make sure to call init_app() first.'
return app.extensions['sqlalchemy']
class _SQLAlchemyState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, db):
self.db = db
self.connectors = {}
class SQLAlchemy(object):
"""This class is used to control the SQLAlchemy integration to one
or more Flask applications. Depending on how you initialize the
object it is usable right away or will attach as needed to a
Flask application.
There are two usage modes which work very similarly. One is binding
the instance to a very specific Flask application::
app = Flask(__name__)
db = SQLAlchemy(app)
The second possibility is to create the object once and configure the
application later to support it::
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
db.init_app(app)
return app
The difference between the two is that in the first case methods like
:meth:`create_all` and :meth:`drop_all` will work all the time but in
the second case a :meth:`flask.Flask.app_context` has to exist.
By default Flask-SQLAlchemy will apply some backend-specific settings
to improve your experience with them. As of SQLAlchemy 0.6 SQLAlchemy
will probe the library for native unicode support. If it detects
unicode it will let the library handle that, otherwise do that itself.
Sometimes this detection can fail in which case you might want to set
``use_native_unicode`` (or the ``SQLALCHEMY_NATIVE_UNICODE`` configuration
key) to ``False``. Note that the configuration key overrides the
value you pass to the constructor.
This class also provides access to all the SQLAlchemy functions and classes
from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules. So you can
declare models like this::
class User(db.Model):
username = db.Column(db.String(80), unique=True)
pw_hash = db.Column(db.String(80))
You can still use :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly, but
note that Flask-SQLAlchemy customizations are available only through an
instance of this :class:`SQLAlchemy` class. Query classes default to
:class:`BaseQuery` for `db.Query`, `db.Model.query_class`, and the default
query_class for `db.relationship` and `db.backref`. If you use these
interfaces through :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly,
the default query class will be that of :mod:`sqlalchemy`.
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
The ``session_options`` parameter, if provided, is a dict of parameters
to be passed to the session constructor. See :class:`~sqlalchemy.orm.session.Session`
for the standard options.
.. versionadded:: 0.10
The `session_options` parameter was added.
.. versionadded:: 0.16
`scopefunc` is now accepted on `session_options`. It allows specifying
a custom function which will define the SQLAlchemy session's scoping.
.. versionadded:: 2.1
The `metadata` parameter was added. This allows for setting custom
naming conventions among other, non-trivial things.
.. versionadded:: 3.0
The `query_class` parameter was added, to allow customisation
of the query class, in place of the default of :class:`BaseQuery`.
The `model_class` parameter was added, which allows a custom model
class to be used in place of :class:`Model`.
.. versionchanged:: 3.0
Utilise the same query class across `session`, `Model.query` and `Query`.
"""
#: Default query class used by :attr:`Model.query` and other queries.
#: Customize this by passing ``query_class`` to :func:`SQLAlchemy`.
#: Defaults to :class:`BaseQuery`.
Query = None
def __init__(self,
app=None,
use_native_unicode=True,
session_options=None,
metadata=None,
query_class=BaseQuery,
model_class=Model):
self.use_native_unicode = use_native_unicode
self.Query = query_class
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(model_class, metadata)
self._engine_lock = Lock()
self.app = app
_include_sqlalchemy(self, query_class)
if app is not None:
self.init_app(app)
@property
def metadata(self):
"""The metadata associated with ``db.Model``."""
return self.Model.metadata
def create_scoped_session(self, options=None):
"""Create a :class:`~sqlalchemy.orm.scoping.scoped_session`
on the factory from :meth:`create_session`.
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
:param options: dict of keyword arguments passed to session class in
``create_session``
"""
if options is None:
options = {}
scopefunc = options.pop('scopefunc', _app_ctx_stack.__ident_func__)
options.setdefault('query_cls', self.Query)
return orm.scoped_session(self.create_session(options),
scopefunc=scopefunc)
def create_session(self, options):
"""Create the session factory used by :meth:`create_scoped_session`.
The factory **must** return an object that SQLAlchemy recognizes as a session,
or registering session events may raise an exception.
Valid factories include a :class:`~sqlalchemy.orm.session.Session`
class or a :class:`~sqlalchemy.orm.session.sessionmaker`.
The default implementation creates a ``sessionmaker`` for :class:`SignallingSession`.
:param options: dict of keyword arguments passed to session class
"""
return orm.sessionmaker(class_=SignallingSession, db=self, **options)
def make_declarative_base(self, model, metadata=None):
"""Creates the declarative base that all models will inherit from.
:param model: base model class (or a tuple of base classes) to pass
to :func:`~sqlalchemy.ext.declarative.declarative_base`. Or a class
returned from ``declarative_base``, in which case a new base class
is not created.
:param: metadata: :class:`~sqlalchemy.MetaData` instance to use, or
none to use SQLAlchemy's default.
.. versionchanged 2.3.0::
``model`` can be an existing declarative base in order to support
complex customization such as changing the metaclass.
"""
if not isinstance(model, DeclarativeMeta):
model = declarative_base(cls=model,
name='Model',
metadata=metadata,
metaclass=DefaultMeta)
# if user passed in a declarative base and a metaclass for some reason,
# make sure the base uses the metaclass
if metadata is not None and model.metadata is not metadata:
model.metadata = metadata
if not getattr(model, 'query_class', None):
model.query_class = self.Query
model.query = _QueryProperty(self)
return model
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. Never use a database in the context
of an application not initialized that way or connections will
leak.
"""
if ('SQLALCHEMY_DATABASE_URI' not in app.config
and 'SQLALCHEMY_BINDS' not in app.config):
warnings.warn(
'Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. '
'Defaulting SQLALCHEMY_DATABASE_URI to "sqlite:///:memory:".')
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:')
app.config.setdefault('SQLALCHEMY_BINDS', None)
app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
app.config.setdefault('SQLALCHEMY_ECHO', False)
app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)
app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
app.config.setdefault('SQLALCHEMY_MAX_OVERFLOW', None)
app.config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', False)
track_modifications = app.config.setdefault(
'SQLALCHEMY_TRACK_MODIFICATIONS', None)
if track_modifications is None:
warnings.warn(
FSADeprecationWarning(
'SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and '
'will be disabled by default in the future. Set it to True '
'or False to suppress this warning.'))
app.extensions['sqlalchemy'] = _SQLAlchemyState(self)
@app.teardown_appcontext
def shutdown_session(response_or_exc):
if app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
if response_or_exc is None:
self.session.commit()
self.session.remove()
return response_or_exc
def apply_pool_defaults(self, app, options):
def _setdefault(optionkey, configkey):
value = app.config[configkey]
if value is not None:
options[optionkey] = value
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
def apply_driver_hacks(self, app, info, options):
"""This method is called before engine creation and used to inject
driver specific hacks into the options. The `options` parameter is
a dictionary of keyword arguments that will then be used to call
the :func:`sqlalchemy.create_engine` function.
The default implementation provides some saner defaults for things
like pool sizes for MySQL and sqlite. Also it injects the setting of
`SQLALCHEMY_NATIVE_UNICODE`.
"""
if info.drivername.startswith('mysql'):
info.query.setdefault('charset', 'utf8')
if info.drivername != 'mysql+gaerdbms':
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif info.drivername == 'sqlite':
pool_size = options.get('pool_size')
detected_in_memory = False
if info.database in (None, '', ':memory:'):
detected_in_memory = True
from sqlalchemy.pool import StaticPool
options['poolclass'] = StaticPool
if 'connect_args' not in options:
options['connect_args'] = {}
options['connect_args']['check_same_thread'] = False
# we go to memory and the pool size was explicitly set
# to 0 which is fail. Let the user know that
if pool_size == 0:
raise RuntimeError('SQLite in memory database with an '
'empty queue not possible due to data '
'loss.')
# if pool size is None or explicitly set to 0 we assume the
# user did not want a queue for this sqlite connection and
# hook in the null pool.
elif not pool_size:
from sqlalchemy.pool import NullPool
options['poolclass'] = NullPool
# if it's not an in memory database we make the path absolute.
if not detected_in_memory:
info.database = os.path.join(app.root_path, info.database)
unu = app.config['SQLALCHEMY_NATIVE_UNICODE']
if unu is None:
unu = self.use_native_unicode
if not unu:
options['use_native_unicode'] = False
@property
def engine(self):
"""Gives access to the engine. If the database configuration is bound
to a specific application (initialized with an application) this will
always return a database connection. If however the current application
is used this might raise a :exc:`RuntimeError` if no application is
active at the moment.
"""
return self.get_engine()
def make_connector(self, app=None, bind=None):
"""Creates the connector for a given state and bind."""
return _EngineConnector(self, self.get_app(app), bind)
def get_engine(self, app=None, bind=None):
"""Returns a specific engine."""
app = self.get_app(app)
state = get_state(app)
with self._engine_lock:
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector
return connector.get_engine()
def get_app(self, reference_app=None):
"""Helper method that implements the logic to look up an
application."""
if reference_app is not None:
return reference_app
if current_app:
return current_app._get_current_object()
if self.app is not None:
return self.app
raise RuntimeError(
'No application found. Either work inside a view function or push'
' an application context. See'
' http://flask-sqlalchemy.pocoo.org/contexts/.')
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
result = []
for table in itervalues(self.Model.metadata.tables):
if table.info.get('bind_key') == bind:
result.append(table)
return result
def get_binds(self, app=None):
"""Returns a dictionary with a table->engine mapping.
This is suitable for use of sessionmaker(binds=db.get_binds(app)).
"""
app = self.get_app(app)
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
retval = {}
for bind in binds:
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def _execute_for_all_tables(self, app, bind, operation, skip_tables=False):
app = self.get_app(app)
if bind == '__all__':
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
elif isinstance(bind, string_types) or bind is None:
binds = [bind]
else:
binds = bind
for bind in binds:
extra = {}
if not skip_tables:
tables = self.get_tables_for_bind(bind)
extra['tables'] = tables
op = getattr(self.Model.metadata, operation)
op(bind=self.get_engine(app, bind), **extra)
def create_all(self, bind='__all__', app=None):
"""Creates all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'create_all')
def drop_all(self, bind='__all__', app=None):
"""Drops all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'drop_all')
def reflect(self, bind='__all__', app=None):
"""Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'reflect', skip_tables=True)
def __repr__(self):
return '<%s engine=%r>' % (self.__class__.__name__, self.engine.url
if self.app or current_app else None)
class _BoundDeclarativeMeta(DefaultMeta):
def __init__(cls, name, bases, d):
warnings.warn(FSADeprecationWarning(
'"_BoundDeclarativeMeta" has been renamed to "DefaultMeta". The'
' old name will be removed in 3.0.'),
stacklevel=3)
super(_BoundDeclarativeMeta, cls).__init__(name, bases, d)
class FSADeprecationWarning(DeprecationWarning):
pass
warnings.simplefilter('always', FSADeprecationWarning)
| 36.621349
| 123
| 0.612705
|
a305eaaef0b2c4406156c21bb56dd19b134c118c
| 104
|
py
|
Python
|
DescriptiveStatistics/mode.py
|
sv549/statscalc
|
338be993adf33f8e835990a67f401214931d4c3f
|
[
"MIT"
] | null | null | null |
DescriptiveStatistics/mode.py
|
sv549/statscalc
|
338be993adf33f8e835990a67f401214931d4c3f
|
[
"MIT"
] | null | null | null |
DescriptiveStatistics/mode.py
|
sv549/statscalc
|
338be993adf33f8e835990a67f401214931d4c3f
|
[
"MIT"
] | null | null | null |
import statistics
class Mode:
@staticmethod
def mode(num):
return statistics.mode(num)
| 14.857143
| 35
| 0.673077
|
db9e094810abeaf242cc10be1c67b6019ec23a5a
| 132
|
py
|
Python
|
code/otherStrats/kjhSkewedRandom2.py
|
kjh618/carykh-prisoners-dilemma-tournament
|
e44821eeaf4c6d824e0278370d51fab76adf543a
|
[
"MIT"
] | 1
|
2021-07-01T10:41:28.000Z
|
2021-07-01T10:41:28.000Z
|
code/otherStrats/kjhSkewedRandom2.py
|
kjh618/carykh-prisoners-dilemma-tournament
|
e44821eeaf4c6d824e0278370d51fab76adf543a
|
[
"MIT"
] | null | null | null |
code/otherStrats/kjhSkewedRandom2.py
|
kjh618/carykh-prisoners-dilemma-tournament
|
e44821eeaf4c6d824e0278370d51fab76adf543a
|
[
"MIT"
] | null | null | null |
import random
def strategy(history, memory):
if random.random() < 0.2:
return 1, None
else:
return 0, None
| 16.5
| 30
| 0.590909
|
d916438cc9417c23078abb93f65f74db9f37a4ae
| 9,925
|
py
|
Python
|
datalad/support/tests/test_fileinfo.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | null | null | null |
datalad/support/tests/test_fileinfo.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | null | null | null |
datalad/support/tests/test_fileinfo.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | null | null | null |
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test file info getters"""
import os.path as op
import datalad.utils as ut
from datalad.tests.utils import (
assert_dict_equal,
assert_equal,
assert_in,
assert_not_in,
assert_raises,
known_failure_githubci_win,
with_tempfile,
)
from datalad.distribution.dataset import Dataset
from datalad.support.gitrepo import GitRepo
from datalad.tests.utils import (
assert_repo_status,
get_convoluted_situation,
)
@known_failure_githubci_win
@with_tempfile
def test_get_content_info(path):
repo = GitRepo(path)
assert_equal(repo.get_content_info(), {})
# an invalid reference causes an exception
assert_raises(ValueError, repo.get_content_info, ref='HEAD')
ds = get_convoluted_situation(path)
repopath = ds.repo.pathobj
assert_equal(ds.repo.pathobj, repopath)
assert_equal(ds.pathobj, ut.Path(path))
# verify general rules on fused info records that are incrementally
# assembled: for git content info, amended with annex info on 'HEAD'
# (to get the last commited stage and with it possibly vanished
# content), and lastly annex info wrt to the present worktree, to
# also get info on added/staged content
# this fuses the info reported from
# - git ls-files
# - git annex findref HEAD
# - git annex find --include '*'
for f, r in ds.repo.annexstatus().items():
if f.match('*_untracked'):
assert(r.get('gitshasum', None) is None)
if f.match('*_deleted'):
assert(not f.exists() and not f.is_symlink() is None)
if f.match('subds_*'):
assert(r['type'] == 'dataset' if r.get('gitshasum', None) else 'directory')
if f.match('file_*'):
# which one exactly depends on many things
assert_in(r['type'], ('file', 'symlink'))
if f.match('file_ingit*'):
assert(r['type'] == 'file')
elif '.datalad' not in f.parts and not f.match('.git*') and \
r.get('gitshasum', None) and not f.match('subds*'):
# this should be known to annex, one way or another
# regardless of whether things add deleted or staged
# or anything inbetween
assert_in('key', r, f)
assert_in('keyname', r, f)
assert_in('backend', r, f)
assert_in('bytesize', r, f)
# no duplication with path
assert_not_in('file', r, f)
# query full untracked report
res = ds.repo.get_content_info()
assert_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_not_in(repopath.joinpath('dir_untracked'), res)
# query for compact untracked report
res = ds.repo.get_content_info(untracked='normal')
assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_in(repopath.joinpath('dir_untracked'), res)
# query no untracked report
res = ds.repo.get_content_info(untracked='no')
assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_not_in(repopath.joinpath('dir_untracked'), res)
# git status integrity
status = ds.repo.status()
for t in ('subds', 'file'):
for s in ('untracked', 'added', 'deleted', 'clean',
'ingit_clean', 'dropped_clean', 'modified',
'ingit_modified'):
for l in ('', ut.PurePosixPath('subdir', '')):
if t == 'subds' and 'ingit' in s or 'dropped' in s:
# invalid combination
continue
if t == 'subds' and s == 'deleted':
# same as subds_unavailable -> clean
continue
p = repopath.joinpath(l, '{}_{}'.format(t, s))
assert p.match('*_{}'.format(status[p]['state'])), p
if t == 'subds':
assert_in(status[p]['type'], ('dataset', 'directory'), p)
else:
assert_in(status[p]['type'], ('file', 'symlink'), p)
# git annex status integrity
annexstatus = ds.repo.annexstatus()
for t in ('file',):
for s in ('untracked', 'added', 'deleted', 'clean',
'ingit_clean', 'dropped_clean', 'modified',
'ingit_modified'):
for l in ('', ut.PurePosixPath('subdir', '')):
p = repopath.joinpath(l, '{}_{}'.format(t, s))
if s in ('untracked', 'ingit_clean', 'ingit_modified'):
# annex knows nothing about these things
assert_not_in('key', annexstatus[p])
continue
assert_in('key', annexstatus[p])
# dear future,
# if the next one fails, git-annex might have changed the
# nature of the path that are being reported by
# `annex find --json`
# when this was written `hashir*` was a native path, but
# `file` was a POSIX path
assert_equal(annexstatus[p]['has_content'], 'dropped' not in s)
# check the different subds evaluation modes
someds = Dataset(ds.pathobj / 'subds_modified' / 'someds')
dirtyds_path = someds.pathobj / 'dirtyds'
assert_not_in(
'state',
someds.repo.status(eval_submodule_state='no')[dirtyds_path]
)
assert_equal(
'clean',
someds.repo.status(eval_submodule_state='commit')[dirtyds_path]['state']
)
assert_equal(
'modified',
someds.repo.status(eval_submodule_state='full')[dirtyds_path]['state']
)
@with_tempfile
def test_compare_content_info(path):
# TODO remove when `create` is RF to return the new Dataset
ds = Dataset(path).create()
assert_repo_status(path)
# for a clean repo HEAD and worktree query should yield identical results
# minus a 'bytesize' report that is readily available for HEAD, but would
# not a stat call per file for the worktree, and is not done ATM
wt = ds.repo.get_content_info(ref=None)
assert_dict_equal(
wt,
{f: {k: v for k, v in p.items() if k != 'bytesize'}
for f, p in ds.repo.get_content_info(ref='HEAD').items()}
)
@with_tempfile
def test_subds_path(path):
# a dataset with a subdataset with a file, all neatly tracked
ds = Dataset(path).create()
subds = ds.create('sub')
assert_repo_status(path)
with (subds.pathobj / 'some.txt').open('w') as f:
f.write(u'test')
ds.save(recursive=True)
assert_repo_status(path)
# querying the toplevel dataset repo for a subdspath should
# report the subdataset record in the dataset
# (unlike `git status`, which is silent for subdataset paths),
# but definitely not report the subdataset as deleted
# https://github.com/datalad/datalad-revolution/issues/17
stat = ds.repo.status(paths=[op.join('sub', 'some.txt')])
assert_equal(list(stat.keys()), [subds.repo.pathobj])
assert_equal(stat[subds.repo.pathobj]['state'], 'clean')
@with_tempfile
def test_report_absent_keys(path):
ds = Dataset(path).create()
# create an annexed file
testfile = ds.pathobj / 'dummy'
testfile.write_text(u'nothing')
ds.save()
# present in a full report and in a partial report
# based on worktree of HEAD ref
for ai in (
ds.repo.get_content_annexinfo(eval_availability=True),
ds.repo.get_content_annexinfo(
paths=['dummy'],
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
paths=['dummy'],
eval_availability=True)):
assert_in(testfile, ai)
assert_equal(ai[testfile]['has_content'], True)
# drop the key, not available anywhere else
ds.drop('dummy', check=False)
# does not change a thing, except the key is gone
for ai in (
ds.repo.get_content_annexinfo(eval_availability=True),
ds.repo.get_content_annexinfo(
paths=['dummy'],
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
paths=['dummy'],
eval_availability=True)):
assert_in(testfile, ai)
assert_equal(ai[testfile]['has_content'], False)
@with_tempfile
def test_annexinfo_init(path):
ds = Dataset(path).create()
foo = ds.pathobj / "foo"
foo_cont = b"foo content"
foo.write_bytes(foo_cont)
bar = ds.pathobj / "bar"
bar.write_text(u"bar content")
ds.save()
# Custom init limits report, with original dict getting updated.
cinfo_custom_init = ds.repo.get_content_annexinfo(
init={foo: {"bytesize": 0,
"this-is-surely-only-here": "right?"}})
assert_not_in(bar, cinfo_custom_init)
assert_in(foo, cinfo_custom_init)
assert_equal(cinfo_custom_init[foo]["bytesize"], len(foo_cont))
assert_equal(cinfo_custom_init[foo]["this-is-surely-only-here"],
"right?")
# "git" injects get_content_info() values.
cinfo_init_git = ds.repo.get_content_annexinfo(init="git")
assert_in("gitshasum", cinfo_init_git[foo])
# init=None, on the other hand, does not.
cinfo_init_none = ds.repo.get_content_annexinfo(init=None)
assert_in(foo, cinfo_init_none)
assert_in(bar, cinfo_init_none)
assert_not_in("gitshasum", cinfo_init_none[foo])
| 38.173077
| 87
| 0.605239
|
f4c2a42c0d069841009e30e8a8b799327c8ef1c3
| 6,963
|
py
|
Python
|
get_translations.py
|
nomad-vagabond/BookDict
|
e2f17ef0db32854ad7b9fabe6a2179cc88376d40
|
[
"MIT"
] | null | null | null |
get_translations.py
|
nomad-vagabond/BookDict
|
e2f17ef0db32854ad7b9fabe6a2179cc88376d40
|
[
"MIT"
] | null | null | null |
get_translations.py
|
nomad-vagabond/BookDict
|
e2f17ef0db32854ad7b9fabe6a2179cc88376d40
|
[
"MIT"
] | null | null | null |
import json, random, time, re, os
import lingvo_api
_sleeptimes = [1,3,10,30,60]
def translate_words_recursive(words, dumpfile, srclang='En', dstlang='Uk', stime_idx=0):
if stime_idx > 4:
return
translations = []
delayed = []
stimes = [st for st in _sleeptimes[:stime_idx+1]]
for i, word in enumerate(words):
print(i, word)
if i == 300:
print("Sleeping for 1 minute...")
time.sleep(60)
time.sleep(0.5)
translation = lingvo_api.get_translation(word, srclang=srclang, dstlang=dstlang)
if type(translation) is int:
print('Server says:', translation)
if translation == 429:
for j, stime in enumerate(stimes):
print("waiting for %d seconds" %stime)
time.sleep(stime)
translation = lingvo_api.get_translation(word, srclang=srclang,
dstlang=dstlang)
if type(translation) is int:
print('Server says:', translation)
if translation == 429 and j == len(stimes)-1:
delayed.append(word)
else:
# translations.append(translation)
trans_json = json.dumps(translation,
indent=4, ensure_ascii=False)
with open(dumpfile, 'a') as transdump:
transdump.write(trans_json + ',\n')
break
else:
trans_json = json.dumps(translation, indent=4, ensure_ascii=False)
with open(dumpfile, 'a') as transdump:
transdump.write(trans_json + ',\n')
print("len(delayed):", len(delayed))
translate_words_recursive(delayed, dumpfile, srclang=srclang, dstlang=dstlang,
stime_idx=stime_idx+1)
def load_words(wordsfile, store_familiar=True,
famfile="./vocabulary/familiar/familiar_words.rst"):
unfamiliar = []
familiar = []
if store_familiar:
try:
with open(famfile) as fam:
familiar = fam.readlines()
except: pass
with open(wordsfile) as words_file:
words = words_file.readlines()
for word in words:
if word[:2] == '..':
familiar.append(word[3:])
else:
unfamiliar.append(word.strip('\n'))
if store_familiar:
familiar = sorted(list(set(familiar)))
with open(famfile, 'w') as fam:
for famword in familiar:
fam.write(famword)
print("Total number of unfamiliar words in book is %d" %len(unfamiliar))
return unfamiliar
def translate(words, dumpfile, srclang='En', dstlang='Uk'):
with open(dumpfile, 'w') as transdump:
transdump.write('')
# translate_words(words[:1000])
translate_words_recursive(words, dumpfile, srclang=srclang, dstlang=dstlang)
with open(dumpfile, 'r+') as transdump:
tolist = "[\n" + transdump.read()[:-2] + "\n]"
transdump.seek(0, 0)
transdump.write(tolist)
def build_dictionary(dumpfile, dictfile, blocknum=9, sortwords='a', insertline=True):
with open(dumpfile) as transdump:
translations = json.loads(transdump.read())
items = []
for item in translations:
line = item['Heading'] + " - " + item['Translation']
items.append(line)
items = list(set(items))
if sortwords == 'a':
items = sorted(items)
elif sortwords == 'r':
random.shuffle(items)
if insertline:
addchar = '\n\n'
else:
addchar = ''
if not blocknum:
blocknum = len(items) + 1
bn = 1
with open(dictfile, 'w') as worddict:
i = j = 0
blockwords = []
for line in items:
word_trans = line.split(' - ')
word = "**" + word_trans[0] + "**"
if len(blockwords) == 0:
worddict.write("## Block " + str(bn) + "\n\n")
blockwords.append(word)
worddict.write(word + ' - ' + word_trans[1] + addchar)
if (i == blocknum) or (j == len(items)-1):
# if j == 3:
worddict.write('---\n\n')
random.shuffle(blockwords)
for bword in blockwords:
worddict.write(bword + addchar)
worddict.write('---\n\n')
i = 0
blockwords = []
bn += 1
i += 1
j += 1
print("Dictionary is successfully built.")
if __name__ == '__main__':
# print('Enter path to book file. (Currently only .epub format is supported)')
# book = input()
wordsfile = "./vocabulary/Dragon's Egg.rst"
words = load_words(wordsfile=wordsfile, store_familiar=True)
bookname = re.findall('.*/(.*)\.', wordsfile)[0]
dumpfile = './vocabulary/' + bookname + ".json"
if os.path.exists(dumpfile):
print("""Translations are already recieved.
If you would like to recieve them again delete
<bookname>.json file first.""")
else:
random.shuffle(words)
# print(words)
# print(len(words))
translate(words, dumpfile, srclang='En', dstlang='Ru')
dictfile = './vocabulary/' + bookname + ".md"
build_dictionary(dumpfile, dictfile, sortwords='r')
# DEPRECATED
# def translate_words(words, wordnum=0, dumpfile="translations.txt"):
# translations = []
# _sleeptimes = [3,10,30,60]
# if wordnum:
# stop = wordnum
# else:
# stop = len(words) - wordnum
# # print("stop:", stop)
# for i, word in enumerate(words):
# if i == stop:
# break
# translation = lingvo_api.get_translation(word)
# if type(translation) is int:
# print(translation)
# if translation == 429:
# for stime in _sleeptimes:
# time.sleep(stime)
# translation = lingvo_api.get_translation(word)
# if type(translation) is not int:
# break
# translations.append(translation)
# with io.open(dumpfile, 'w', encoding='utf8') as transdump:
# translations_json = json.dumps(translations, sort_keys=True,
# indent=4, ensure_ascii=False)
# transdump.write(translations_json)
# # with io.open(dumpfile, 'w', encoding='utf8') as transdump:
# # for item in translations:
# # transdump.write(translations_json)
| 33.315789
| 89
| 0.516588
|
c82d5ed3d1a674e3cb8da2971712b339e678778f
| 4,285
|
py
|
Python
|
isovar/nucleotide_counts.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
isovar/nucleotide_counts.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
isovar/nucleotide_counts.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import numpy as np
from .dna import (
dna_nucleotide_to_index,
index_to_dna_nucleotide,
)
from .read_helpers import (
make_prefix_suffix_pairs,
get_single_allele_from_reads,
)
def nucleotide_counts(variant_reads):
"""
Count the number of times {A, C, T, G} occur at each position to the
left and right of the variant.
Parameters
----------
variant_reads : list of AlleleRead objects
Expected to all contain the same variant allele.
Returns a tuple with the following elements:
- a matrix with four rows and as many columns as the sum of the longest
prefix preceding the variant, the longest suffix after the variant and
the number of variant nucleotids.
- the column indices for the variant nucleotides
"""
variant_seq = get_single_allele_from_reads(variant_reads)
prefix_suffix_pairs = make_prefix_suffix_pairs(variant_reads)
n_reads = len(prefix_suffix_pairs)
max_prefix_length = max(len(p) for (p, _) in prefix_suffix_pairs)
max_suffix_length = max(len(s) for (_, s) in prefix_suffix_pairs)
n_variant_nucleotides = len(variant_seq)
n_cols = max_prefix_length + max_suffix_length + n_variant_nucleotides
counts = np.zeros((4, n_cols), dtype=int)
variant_column_indices = []
# first fill in the variant nucleotide counts, since they'll
# be invariant across all the supporting reads
for i, nucleotide in enumerate(variant_seq):
variant_col_idx = max_prefix_length + i
variant_column_indices.append(variant_col_idx)
row_idx = dna_nucleotide_to_index[dna_nucleotide_to_index]
counts[row_idx, variant_col_idx] = n_reads
for p, s in prefix_suffix_pairs:
for i, prefix_col_idx in enumerate(range(
max_prefix_length - len(p),
max_prefix_length)):
row_idx = dna_nucleotide_to_index[p[i]]
counts[row_idx, prefix_col_idx] += 1
for i, suffix_col_idx in enumerate(range(
max_prefix_length + n_variant_nucleotides,
max_prefix_length + n_variant_nucleotides + len(s))):
row_idx = dna_nucleotide_to_index[s[i]]
counts[row_idx, suffix_col_idx] += 1
return counts, variant_column_indices
def most_common_nucleotides(partitioned_read_sequences):
"""
Find the most common nucleotide at each offset to the left and
right of a variant.
Parameters
----------
partitioned_read_sequences : list of tuples
Each tuple has three elements:
- sequence before mutant nucleotides
- mutant nucleotides
- sequence after mutant nucleotides
Returns a tuple with the following elements:
- nucleotide sequence from most common nucleotide at each offset
relative to the variant
- an array of counts indicating how many reads supported this nucleotide
- an array of counts for all the *other* nucleotides at that position
"""
counts, variant_column_indices = nucleotide_counts(
partitioned_read_sequences)
max_count_per_column = counts.max(axis=0)
assert len(max_count_per_column) == counts.shape[1]
max_nucleotide_index_per_column = np.argmax(counts, axis=0)
assert len(max_nucleotide_index_per_column) == counts.shape[1]
nucleotides = [
index_to_dna_nucleotide[idx]
for idx in max_nucleotide_index_per_column
]
other_nucleotide_counts = counts.sum(axis=0) - max_count_per_column
return "".join(nucleotides), max_count_per_column, other_nucleotide_counts
| 37.920354
| 80
| 0.711785
|
a778c6d74e22a7ddaae5415945df773f99194e15
| 16,799
|
py
|
Python
|
sample_apps/python/QueryExample.py
|
jmgray24/amazon-timestream-tools
|
7c7b80b56c9435cc00e029d8c59d6c19c19b3ad3
|
[
"MIT-0"
] | 143
|
2020-10-01T03:18:02.000Z
|
2022-03-28T18:17:17.000Z
|
sample_apps/python/QueryExample.py
|
jmgray24/amazon-timestream-tools
|
7c7b80b56c9435cc00e029d8c59d6c19c19b3ad3
|
[
"MIT-0"
] | 36
|
2020-10-02T17:31:09.000Z
|
2022-03-13T18:45:31.000Z
|
sample_apps/python/QueryExample.py
|
jmgray24/amazon-timestream-tools
|
7c7b80b56c9435cc00e029d8c59d6c19c19b3ad3
|
[
"MIT-0"
] | 106
|
2020-10-01T13:46:36.000Z
|
2022-03-28T18:17:10.000Z
|
from Constant import DATABASE_NAME, TABLE_NAME, ONE_GB_IN_BYTES
class QueryExample:
HOSTNAME = "host-24Gju"
def __init__(self, client):
self.client = client
self.paginator = client.get_paginator('query')
# See records ingested into this table so far
SELECT_ALL = f"SELECT * FROM {DATABASE_NAME}.{TABLE_NAME}"
# 1. Find the average, p90, p95, and p99 CPU utilization for a specific EC2 host over the past 2 hours.
QUERY_1 = f"""
SELECT region, az, hostname, BIN(time, 15s) AS binned_timestamp,
ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.9), 2) AS p90_cpu_utilization,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.95), 2) AS p95_cpu_utilization,
ROUND(APPROX_PERCENTILE(measure_value::double, 0.99), 2) AS p99_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY region, hostname, az, BIN(time, 15s)
ORDER BY binned_timestamp ASC
"""
# 2. Identify EC2 hosts with CPU utilization that is higher by 10% or more compared to the average
# CPU utilization of the entire fleet for the past 2 hours.
QUERY_2 = f"""
WITH avg_fleet_utilization AS (
SELECT COUNT(DISTINCT hostname) AS total_host_count, AVG(measure_value::double) AS fleet_avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND time > ago(2h)
), avg_per_host_cpu AS (
SELECT region, az, hostname, AVG(measure_value::double) AS avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY region, az, hostname
)
SELECT region, az, hostname, avg_cpu_utilization, fleet_avg_cpu_utilization
FROM avg_fleet_utilization, avg_per_host_cpu
WHERE avg_cpu_utilization > 1.1 * fleet_avg_cpu_utilization
ORDER BY avg_cpu_utilization DESC
"""
# 3. Find the average CPU utilization binned at 30 second intervals for a specific EC2 host over the past 2 hours.
QUERY_3 = f"""
SELECT BIN(time, 30s) AS binned_timestamp, ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization, hostname
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY hostname, BIN(time, 30s)
ORDER BY binned_timestamp ASC
"""
# 4. Find the average CPU utilization binned at 30 second intervals for a specific EC2 host over the past 2 hours,
# filling in the missing values using linear interpolation.
QUERY_4 = f"""
WITH binned_timeseries AS (
SELECT hostname, BIN(time, 30s) AS binned_timestamp,
ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY hostname, BIN(time, 30s)
), interpolated_timeseries AS (
SELECT hostname,
INTERPOLATE_LINEAR(
CREATE_TIME_SERIES(binned_timestamp, avg_cpu_utilization),
SEQUENCE(min(binned_timestamp), max(binned_timestamp), 15s)) AS interpolated_avg_cpu_utilization
FROM binned_timeseries
GROUP BY hostname
)
SELECT time, ROUND(value, 2) AS interpolated_cpu
FROM interpolated_timeseries
CROSS JOIN UNNEST(interpolated_avg_cpu_utilization)
"""
# 5. Find the average CPU utilization binned at 30 second intervals for a specific EC2 host over the past 2 hours,
# filling in the missing values using interpolation based on the last observation carried forward.
QUERY_5 = f"""
WITH binned_timeseries AS (
SELECT hostname, BIN(time, 30s) AS binned_timestamp,
ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY hostname, BIN(time, 30s)
), interpolated_timeseries AS (
SELECT hostname,
INTERPOLATE_LOCF(
CREATE_TIME_SERIES(binned_timestamp, avg_cpu_utilization),
SEQUENCE(min(binned_timestamp), max(binned_timestamp), 15s)) AS interpolated_avg_cpu_utilization
FROM binned_timeseries
GROUP BY hostname
)
SELECT time, ROUND(value, 2) AS interpolated_cpu
FROM interpolated_timeseries
CROSS JOIN UNNEST(interpolated_avg_cpu_utilization)
"""
# 6. Find the average CPU utilization binned at 30 second intervals for a specific EC2 host over the past 2 hours,
# filling in the missing values using interpolation based on a constant value.
QUERY_6 = f"""
WITH binned_timeseries AS (
SELECT hostname, BIN(time, 30s) AS binned_timestamp,
ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY hostname, BIN(time, 30s)
), interpolated_timeseries AS (
SELECT hostname,
INTERPOLATE_FILL(
CREATE_TIME_SERIES(binned_timestamp, avg_cpu_utilization),
SEQUENCE(min(binned_timestamp),
max(binned_timestamp), 15s), 10.0) AS interpolated_avg_cpu_utilization
FROM binned_timeseries
GROUP BY hostname
)
SELECT time, ROUND(value, 2) AS interpolated_cpu
FROM interpolated_timeseries
CROSS JOIN UNNEST(interpolated_avg_cpu_utilization)
"""
# 7. Find the average CPU utilization binned at 30 second intervals for a specific EC2 host over the past 2 hours,
# filling in the missing values using cubic spline interpolation.
QUERY_7 = f"""
WITH binned_timeseries AS (
SELECT hostname, BIN(time, 30s) AS binned_timestamp,
ROUND(AVG(measure_value::double), 2) AS avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND hostname = '{HOSTNAME}'
AND time > ago(2h)
GROUP BY hostname, BIN(time, 30s)
), interpolated_timeseries AS (
SELECT hostname,
INTERPOLATE_SPLINE_CUBIC(
CREATE_TIME_SERIES(binned_timestamp, avg_cpu_utilization),
SEQUENCE(min(binned_timestamp), max(binned_timestamp), 15s)) AS interpolated_avg_cpu_utilization
FROM binned_timeseries
GROUP BY hostname
)
SELECT time, ROUND(value, 2) AS interpolated_cpu
FROM interpolated_timeseries
CROSS JOIN UNNEST(interpolated_avg_cpu_utilization)
"""
# 8. Find the average CPU utilization binned at 30 second intervals for all EC2 hosts over the past 2 hours,
# filling in the missing values using last observation carry forward interpolation.
QUERY_8 = f"""
WITH per_host_min_max_timestamp AS (
SELECT hostname, min(time) as min_timestamp, max(time) as max_timestamp
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY hostname
), interpolated_timeseries AS (
SELECT m.hostname,
INTERPOLATE_LOCF(
CREATE_TIME_SERIES(time, measure_value::double),
SEQUENCE(MIN(ph.min_timestamp), MAX(ph.max_timestamp), 1s)) as interpolated_avg_cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME} m
INNER JOIN per_host_min_max_timestamp ph ON m.hostname = ph.hostname
WHERE measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY m.hostname
)
SELECT hostname, AVG(cpu_utilization) AS avg_cpu_utilization
FROM interpolated_timeseries
CROSS JOIN UNNEST(interpolated_avg_cpu_utilization) AS t (time, cpu_utilization)
GROUP BY hostname
ORDER BY avg_cpu_utilization DESC
"""
# 9. Find the percentage of measurements with CPU utilization above 70% for a specific EC2 host
# over the past 2 hours, filling in the missing values using linear interpolation.
QUERY_9 = f"""
WITH time_series_view AS (
SELECT INTERPOLATE_LINEAR(
CREATE_TIME_SERIES(time, ROUND(measure_value::double,2)),
SEQUENCE(min(time), max(time), 10s)) AS cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE hostname = '{HOSTNAME}'
AND measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY hostname
)
SELECT FILTER(cpu_utilization, x -> x.value > 70.0) AS cpu_above_threshold,
REDUCE(FILTER(cpu_utilization, x -> x.value > 70.0), 0, (s, x) -> s + 1, s -> s)
AS count_cpu_above_threshold,
ROUND(REDUCE(cpu_utilization, CAST(ROW(0, 0) AS ROW(count_high BIGINT, count_total BIGINT)),
(s, x) -> CAST(ROW(s.count_high + IF(x.value > 70.0, 1, 0), s.count_total + 1)
AS ROW(count_high BIGINT, count_total BIGINT)),
s -> IF(s.count_total = 0, NULL, CAST(s.count_high AS DOUBLE) / s.count_total)), 4)
AS fraction_cpu_above_threshold
FROM time_series_view
"""
# 10. List the measurements with CPU utilization lower than 75% for a specific EC2 host over the past 2 hours,
# filling in the missing values using linear interpolation.
QUERY_10 = f"""
WITH time_series_view AS (
SELECT min(time) AS oldest_time,
INTERPOLATE_LINEAR(CREATE_TIME_SERIES(time, ROUND(measure_value::double, 2)),
SEQUENCE(min(time), max(time), 10s)) AS cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE hostname = '{HOSTNAME}'
AND measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY hostname
)
SELECT FILTER(cpu_utilization, x -> x.value < 75 AND x.time > oldest_time + 1m)
FROM time_series_view
"""
# 11. Find the total number of measurements with of CPU utilization of 0% for a specific EC2 host
# over the past 2 hours, filling in the missing values using linear interpolation.
QUERY_11 = f"""
WITH time_series_view AS (
SELECT INTERPOLATE_LINEAR(CREATE_TIME_SERIES(time, ROUND(measure_value::double, 2)),
SEQUENCE(min(time), max(time), 10s)) AS cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE hostname = '{HOSTNAME}'
AND measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY hostname
)
SELECT REDUCE(cpu_utilization, DOUBLE '0.0', (s, x) -> s + 1, s -> s) AS count_cpu
FROM time_series_view
"""
# 12. Find the average CPU utilization for a specific EC2 host over the past 2 hours,
# filling in the missing values using linear interpolation.
QUERY_12 = f"""
WITH time_series_view AS (
SELECT INTERPOLATE_LINEAR(CREATE_TIME_SERIES(time, ROUND(measure_value::double, 2)),
SEQUENCE(min(time), max(time), 10s)) AS cpu_utilization
FROM {DATABASE_NAME}.{TABLE_NAME}
WHERE hostname = '{HOSTNAME}'
AND measure_name = 'cpu_utilization'
AND time > ago(2h)
GROUP BY hostname
)
SELECT REDUCE(cpu_utilization, CAST(ROW(0.0, 0) AS ROW(sum DOUBLE, count INTEGER)),
(s, x) -> CAST(ROW(x.value + s.sum, s.count + 1) AS ROW(sum DOUBLE, count INTEGER)),
s -> IF(s.count = 0, NULL, s.sum / s.count)) AS avg_cpu
FROM time_series_view
"""
queries = [QUERY_1, QUERY_2, QUERY_3, QUERY_4, QUERY_5, QUERY_6, QUERY_7,
QUERY_8, QUERY_9, QUERY_10, QUERY_11, QUERY_12]
def run_all_queries(self):
for query_id in range(len(self.queries)):
print("Running query [%d] : [%s]" % (query_id + 1, self.queries[query_id]))
self.run_query(self.queries[query_id])
def run_query(self, query_string):
try:
page_iterator = self.paginator.paginate(QueryString=query_string)
for page in page_iterator:
self._parse_query_result(page)
except Exception as err:
print("Exception while running query:", err)
def _parse_query_result(self, query_result):
query_status = query_result["QueryStatus"]
progress_percentage = query_status["ProgressPercentage"]
print(f"Query progress so far: {progress_percentage}%")
bytes_scanned = float(query_status["CumulativeBytesScanned"]) / ONE_GB_IN_BYTES
print(f"Data scanned so far: {bytes_scanned} GB")
bytes_metered = float(query_status["CumulativeBytesMetered"]) / ONE_GB_IN_BYTES
print(f"Data metered so far: {bytes_metered} GB")
column_info = query_result['ColumnInfo']
print("Metadata: %s" % column_info)
print("Data: ")
for row in query_result['Rows']:
print(self._parse_row(column_info, row))
def _parse_row(self, column_info, row):
data = row['Data']
row_output = []
for j in range(len(data)):
info = column_info[j]
datum = data[j]
row_output.append(self._parse_datum(info, datum))
return "{%s}" % str(row_output)
def _parse_datum(self, info, datum):
if datum.get('NullValue', False):
return "%s=NULL" % info['Name'],
column_type = info['Type']
# If the column is of TimeSeries Type
if 'TimeSeriesMeasureValueColumnInfo' in column_type:
return self._parse_time_series(info, datum)
# If the column is of Array Type
elif 'ArrayColumnInfo' in column_type:
array_values = datum['ArrayValue']
return "%s=%s" % (info['Name'], self._parse_array(info['Type']['ArrayColumnInfo'], array_values))
# If the column is of Row Type
elif 'RowColumnInfo' in column_type:
row_column_info = info['Type']['RowColumnInfo']
row_values = datum['RowValue']
return self._parse_row(row_column_info, row_values)
# If the column is of Scalar Type
else:
return self._parse_column_name(info) + datum['ScalarValue']
def _parse_time_series(self, info, datum):
time_series_output = []
for data_point in datum['TimeSeriesValue']:
time_series_output.append("{time=%s, value=%s}"
% (data_point['Time'],
self._parse_datum(info['Type']['TimeSeriesMeasureValueColumnInfo'],
data_point['Value'])))
return "[%s]" % str(time_series_output)
def _parse_array(self, array_column_info, array_values):
array_output = []
for datum in array_values:
array_output.append(self._parse_datum(array_column_info, datum))
return "[%s]" % str(array_output)
def run_query_with_multiple_pages(self, limit):
query_with_limit = self.SELECT_ALL + " LIMIT " + str(limit)
print("Starting query with multiple pages : " + query_with_limit)
self.run_query(query_with_limit)
def cancel_query(self):
print("Starting query: " + self.SELECT_ALL)
result = self.client.query(QueryString=self.SELECT_ALL)
print("Cancelling query: " + self.SELECT_ALL)
try:
self.client.cancel_query(QueryId=result['QueryId'])
print("Query has been successfully cancelled")
except Exception as err:
print("Cancelling query failed:", err)
@staticmethod
def _parse_column_name(info):
if 'Name' in info:
return info['Name'] + "="
else:
return ""
| 45.898907
| 121
| 0.61843
|
31d8af533c5c69ec3d9046807a13c6a98041aeaf
| 2,238
|
py
|
Python
|
src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py
|
antont/tundra
|
5c9b0a3957071f08ab425dff701cdbb34f9e1868
|
[
"Apache-2.0"
] | null | null | null |
src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py
|
antont/tundra
|
5c9b0a3957071f08ab425dff701cdbb34f9e1868
|
[
"Apache-2.0"
] | null | null | null |
src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py
|
antont/tundra
|
5c9b0a3957071f08ab425dff701cdbb34f9e1868
|
[
"Apache-2.0"
] | null | null | null |
#httplib was ok and httplib2 especially had nice api, but they don't work thru proxies and stuff
#-- curl is the most robust thing
#import httplib
import curl #a high level wrapper over pycurl bindings
import json
import hashlib #only 'cause has a hardcoded pwd here now - for real this comes from connection or launcher
try:
import naali
except ImportError:
naali = None #so that can test standalone too, without Naali
else:
import circuits
class SimiangridAuthentication(circuits.BaseComponent):
pass #put disconnecting to on_exit here to not leave old versions while reloading
url = "http://localhost/Grid/"
c = curl.Curl()
def simiangrid_auth(url, username, md5hex):
params = {'RequestMethod': 'AuthorizeIdentity',
'Identifier': username,
'Type': 'md5hash',
'Credential': md5hex}
rdata = c.post(url, params)
print rdata
r = json.loads(rdata)
#http://code.google.com/p/openmetaverse/wiki/AuthorizeIdentity
success = r.get('Success', False)
#NOTE: docs say reply should have Success:false upon failure.
#however in my test run it doesn't just the Message of missing/invalid creds
#this code works for that too.
return success
def on_connect(conn_id, userconn):
print userconn.GetLoginData()
username = userconn.GetProperty("username")
username = username.replace('_', ' ') #XXX HACK: tundra login doesn't allow spaces, whereas simiangrid frontend demands them
pwd = userconn.GetProperty("password")
md5hex = hashlib.md5(pwd).hexdigest()
success = simiangrid_auth(url, username, md5hex)
print "Authentication success:", success, "for", conn_id, userconn
if not success:
userconn.DenyConnection()
if naali is not None:
s = naali.server
if s.IsAboutToStart():
s.connect("UserAboutToConnect(int, UserConnection*)", on_connect)
print "simiangrid/auth.py running on server - hooked to authorize connections"
else:
on_connect(17, {'username': "Lady Tron",
'password': "They only want you when you're seventeen"})
"""
{ "Success":true, "UserID":"fe5f5ac3-7b28-4276-ae50-133db72040f0" }
Authentication success: True
"""
| 33.909091
| 128
| 0.693476
|
98e0c84112a2ae32aee84a4cb48cdfe7b569ef38
| 4,646
|
py
|
Python
|
tests/test_helper.py
|
yiskw713/pytorch_template
|
cfff5da9bd87da81ecbe05bb53397c5414a63163
|
[
"MIT"
] | 10
|
2020-11-20T05:51:14.000Z
|
2021-12-07T00:49:10.000Z
|
tests/test_helper.py
|
pqy000/plant-recognition
|
40028c213b4ba3fbb20de35d12252e136e40b5bf
|
[
"MIT"
] | 4
|
2021-01-25T15:42:25.000Z
|
2021-05-25T00:05:11.000Z
|
tests/test_helper.py
|
pqy000/plant-recognition
|
40028c213b4ba3fbb20de35d12252e136e40b5bf
|
[
"MIT"
] | 2
|
2020-11-26T07:16:45.000Z
|
2021-09-22T02:45:31.000Z
|
import copy
import numpy as np
import pytest
import torch
import torch.optim as optim
from pytest_mock import MockFixture
from torchvision import transforms
from src.libs.dataset import get_dataloader
from src.libs.helper import do_one_iteration, evaluate, train
from src.libs.loss_fn import get_criterion
from src.libs.models import get_model
@pytest.fixture()
def sample():
img = torch.randn(2, 3, 112, 112)
class_id = torch.tensor([0, 1]).long()
label = ["daisy", "dandelion"]
return {"img": img, "class_id": class_id, "label": label}
@pytest.fixture()
def model_optimizer():
model = get_model("resnet18", 5)
optimizer = optim.Adam(model.parameters(), lr=0.0003)
return (model, optimizer)
@pytest.fixture()
def criterion():
return get_criterion()
def test_do_one_iteration1(sample, model_optimizer, criterion):
# check iteration for training
model, optimizer = model_optimizer
original_model = copy.deepcopy(model)
batch_size, loss, acc1, gt, pred = do_one_iteration(
sample, model, criterion, "cpu", "train", optimizer
)
assert batch_size == 2
assert loss > 0
assert 0 <= acc1 <= 100.0
assert np.all(gt == np.array([0, 1]))
assert pred.shape == (2,)
# check if models have the same weights
# https://discuss.pytorch.org/t/check-if-models-have-same-weights/4351
for key_item1, key_item2 in zip(
model.state_dict().items(), original_model.state_dict().items()
):
# if the weights are completely identical, training does not work.
assert not torch.equal(key_item1[1], key_item2[1])
def test_do_one_iteration2(sample, model_optimizer, criterion):
# check iteration for evaluation
model, optimizer = model_optimizer
original_model = copy.deepcopy(model)
model.eval()
batch_size, loss, acc1, gt, pred = do_one_iteration(
sample, model, criterion, "cpu", "evaluate"
)
assert batch_size == 2
assert loss > 0
assert 0 <= acc1 <= 100.0
assert np.all(gt == np.array([0, 1]))
assert pred.shape == (2,)
# check if models have the same weights
# https://discuss.pytorch.org/t/check-if-models-have-same-weights/4351
for key_item1, key_item2 in zip(
model.state_dict().items(), original_model.state_dict().items()
):
# if the weights are completely identical, training does not work.
assert torch.equal(key_item1[1], key_item2[1])
def test_do_one_iteration3(sample, model_optimizer, criterion):
model, optimizer = model_optimizer
with pytest.raises(ValueError):
do_one_iteration(sample, model, criterion, "cpu", "test")
with pytest.raises(ValueError):
do_one_iteration(sample, model, criterion, "cpu", "train")
def test_train(mocker: MockFixture, model_optimizer, criterion):
model, optimizer = model_optimizer
mocker.patch("src.libs.helper.do_one_iteration").return_value = (
2,
0.1,
50.0,
np.array([0, 1]),
np.array([1, 1]),
)
loader = get_dataloader(
"pytest",
"train",
batch_size=2,
shuffle=False,
num_workers=1,
pin_memory=False,
drop_last=True,
transform=transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
),
)
# make small dataset
loader.dataset.df = loader.dataset.df[:10]
loss, acc1, f1s = train(
loader, model, criterion, optimizer, 0, "cpu", interval_of_progress=1
)
assert model.training
assert loss == 0.1
assert acc1 == 50.0
assert 0 <= f1s <= 1.0
def test_evaluate(mocker: MockFixture, model_optimizer, criterion):
model, _ = model_optimizer
mocker.patch("src.libs.helper.do_one_iteration").return_value = (
2,
0.1,
50.0,
np.array([0, 1]),
np.array([1, 1]),
)
loader = get_dataloader(
"pytest",
"test",
batch_size=2,
shuffle=False,
num_workers=1,
pin_memory=False,
drop_last=False,
transform=transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
),
)
# make small dataset
loader.dataset.df = loader.dataset.df[:10]
n_classes = loader.dataset.get_n_classes()
loss, acc1, f1s, c_matrix = evaluate(loader, model, criterion, "cpu")
assert not model.training
assert loss == 0.1
assert acc1 == 50.0
assert 0 <= f1s <= 1.0
assert c_matrix.shape == (n_classes, n_classes)
| 26.855491
| 77
| 0.632587
|
51eb618624f4c40d4755f6f0ba09568d41cab000
| 2,665
|
py
|
Python
|
commands/chat.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 2
|
2022-02-21T17:55:03.000Z
|
2022-02-22T06:25:04.000Z
|
commands/chat.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 3
|
2022-02-09T18:18:29.000Z
|
2022-03-07T08:15:54.000Z
|
commands/chat.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 1
|
2022-03-07T08:10:59.000Z
|
2022-03-07T08:10:59.000Z
|
#######################
# Dennis MUD #
# chat.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.color import *
NAME = "chat"
CATEGORIES = ["messaging"]
SPECIAL_ALIASES = ['#']
USAGE = "chat <message>"
DESCRIPTION = """Send a message to the general chat.
General chat messages are seen by all online users who have chat enabled and are not ignoring you.
You must also have chat enabled to send a message.
Wizards cannot be ignored.
Ex. `chat Hello everyone!`
Ex2. `#Hello everyone!`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=1):
return False
# Make sure chat is enabled.
if not console.user["chat"]["enabled"]:
console.msg("{0}: Chat must be enabled first.".format(NAME))
return False
# Send our message to all users who have chat enabled and aren't ignoring us.
for u in console.router.users:
if console.router.users[u]["console"].user and console.router.users[u]["console"].user["chat"]["enabled"]:
if not console.user["name"] in console.router.users[u]["console"].user["chat"]["ignored"]:
console.router.users[u]["console"].msg(mcolor(CBMAG,"(Chat) " + console.user["name"] + ": " + ' '.join(args),console.router.users[u]["console"].user["colors"]))
if CONFIG["ircgateway"]["enabled"]:
ircmsg="(Chat) " + console.user["name"] + ": " + ' '.join(args)
console.router.f.p.say(CONFIG["ircgateway"]["channel"],ircmsg)
# Finished.
return True
| 41.640625
| 176
| 0.677298
|
691f0fc5d2d2d8ff4451f536dd0082e4fc03141d
| 4,849
|
py
|
Python
|
src/assets/handle/TSSStageHandle.py
|
5trobl/oaisys
|
e52e3b0e64e5c4f57963e4aabb07946930f62299
|
[
"MIT"
] | null | null | null |
src/assets/handle/TSSStageHandle.py
|
5trobl/oaisys
|
e52e3b0e64e5c4f57963e4aabb07946930f62299
|
[
"MIT"
] | 1
|
2021-11-22T15:42:37.000Z
|
2021-11-22T15:42:37.000Z
|
src/assets/handle/TSSStageHandle.py
|
5trobl/oaisys
|
e52e3b0e64e5c4f57963e4aabb07946930f62299
|
[
"MIT"
] | null | null | null |
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSStageHandle(TSSBase):
"""docstring for TSSStageHandle"""
def __init__(self):
super(TSSStageHandle, self).__init__()
# class vars ###################################################################################################
self._stage_list = [] # list of stage [list]
self._stage_obj_list = [] # list of stage nodes [list]
self._stage_dict = {} # dict of stages [dict]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all stages ############################################################################################
for stage in self._stage_obj_list:
# reset sensor
stage.reset_module()
# maybe obsolete in future versions
del stage
##################################################################################### end of reset all stages #
self.reset_base()
self._stage_list = []
self._stage_obj_list = []
self._stage_dict = {}
def create(self,materials):
""" create function
Args:
materials: list of all materials [list]
Returns:
None
"""
self._create_stages(cfg=self._cfg["STAGES"],
general_cfg=self._cfg["GENERAL"],
materials=materials)
def update_after_meshes(self):
""" update mesh function
Args:
None
Returns:
None
"""
for stage in self._stage_obj_list:
stage.update_after_meshes()
def _create_stages(self,cfg,general_cfg,materials):
""" create function
Args:
cfg: list of stage cfgs [list]
general_cfg: general cfg [dict]
materials: list of all materials [list]
Returns:
None
"""
for ii, stage in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.stages." + stage["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, stage["type"])
_stage = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_stage.set_general_cfg(cfg=general_cfg)
# save name of stage
stage["stageParams"]['name'] = stage["name"]
# update stage cfg
_stage.update_cfg(cfg=stage["stageParams"])
# create material
_stage.create()
# return desired material
_material = _stage.get_desired_material()
############################################################### end of set pass params and create pass #
if _material:
if _material in materials:
_stage.apply_material(material=materials[_material])
else:
raise Exception("Material not found!")
# add pass to list
self._stage_obj_list.append(_stage)
self._stage_list.append(_stage.get_stage())
self._stage_dict[stage["name"]]=_stage.get_stage()
except ImportError:
# manage import error
raise Exception("Cannot add stage")
return -1
return 0
def get_stages(self):
""" get all stages
Args:
None
Returns:
list of stage [list]
"""
return self._stage_list
def get_stage_objs(self):
""" get all stage objects
Args:
None
Returns:
list of stage objects [list]
"""
return self._stage_obj_list
def get_stage_dict(self):
""" get all stage dict
Args:
None
Returns:
list of stage dict [dict]
"""
return self._stage_dict
| 30.689873
| 120
| 0.427098
|
80bf0cff254853870b23c1c734f115e68731333c
| 5,501
|
py
|
Python
|
esrally/utils/net.py
|
tomcallahan/rally
|
4d05fa88ea0920ec1f3178c3705201a53f6420db
|
[
"Apache-2.0"
] | null | null | null |
esrally/utils/net.py
|
tomcallahan/rally
|
4d05fa88ea0920ec1f3178c3705201a53f6420db
|
[
"Apache-2.0"
] | null | null | null |
esrally/utils/net.py
|
tomcallahan/rally
|
4d05fa88ea0920ec1f3178c3705201a53f6420db
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import certifi
import urllib3
import urllib.error
from esrally import exceptions
__HTTP = None
logger = logging.getLogger("rally.net")
def init():
global __HTTP
proxy_url = os.getenv("http_proxy")
if proxy_url and len(proxy_url) > 0:
logger.info("Rally connects via proxy URL [%s] to the Internet (picked up from the environment variable [http_proxy])." % proxy_url)
__HTTP = urllib3.ProxyManager(proxy_url, cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
else:
logger.info("Rally connects directly to the Internet (no proxy support).")
__HTTP = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
class Progress:
def __init__(self, msg, accuracy=0):
from esrally.utils import console
self.p = console.progress()
# if we don't show a decimal sign, the maximum width is 3 (max value is 100 (%)). Else its 3 + 1 (for the decimal point)
# the accuracy that the user requested.
total_width = 3 if accuracy == 0 else 4 + accuracy
# sample formatting string: [%5.1f%%] for an accuracy of 1
self.percent_format = "[%%%d.%df%%%%]" % (total_width, accuracy)
self.msg = msg
def __call__(self, bytes_read, bytes_total):
from esrally.utils import convert
completed = bytes_read / bytes_total
total_as_mb = convert.bytes_to_human_string(bytes_total)
self.p.print("%s (%s total size)" % (self.msg, total_as_mb), self.percent_format % (completed * 100))
def finish(self):
self.p.finish()
def download(url, local_path, expected_size_in_bytes=None, progress_indicator=None):
"""
Downloads a single file from a URL to the provided local path.
:param url: The remote URL specifying one file that should be downloaded. May be either a HTTP or HTTPS URL.
:param local_path: The local file name of the file that should be downloaded.
:param expected_size_in_bytes: The expected file size in bytes if known. It will be used to verify that all data have been downloaded.
:param progress_indicator A callable that can be use to report progress to the user. It is expected to take two parameters
``bytes_read`` and ``total_bytes``. If not provided, no progress is shown. Note that ``total_bytes`` is derived from
the ``Content-Length`` header and not from the parameter ``expected_size_in_bytes``.
"""
tmp_data_set_path = local_path + ".tmp"
try:
with __http().request("GET", url, preload_content=False, retries=10,
timeout=urllib3.Timeout(connect=45, read=240)) as r, open(tmp_data_set_path, "wb") as out_file:
if r.status > 299:
raise urllib.error.HTTPError(url, r.status, "", None, None)
# noinspection PyBroadException
try:
size_from_content_header = int(r.getheader("Content-Length"))
except BaseException:
size_from_content_header = None
chunk_size = 2 ** 16
bytes_read = 0
for chunk in r.stream(chunk_size):
out_file.write(chunk)
bytes_read += len(chunk)
if progress_indicator and size_from_content_header:
progress_indicator(bytes_read, size_from_content_header)
except BaseException:
if os.path.isfile(tmp_data_set_path):
os.remove(tmp_data_set_path)
raise
else:
download_size = os.path.getsize(tmp_data_set_path)
if expected_size_in_bytes is not None and download_size != expected_size_in_bytes:
if os.path.isfile(tmp_data_set_path):
os.remove(tmp_data_set_path)
raise exceptions.DataError("Download of [%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected. Please retry." %
(local_path, download_size, expected_size_in_bytes))
os.rename(tmp_data_set_path, local_path)
def retrieve_content_as_string(url):
with __http().request("GET", url, timeout=urllib3.Timeout(connect=45, read=240)) as response:
return response.read().decode("utf-8")
def has_internet_connection():
try:
# We connect to Github anyway later on so we use that to avoid touching too much different remote endpoints.
probing_url = "https://github.com/"
logger.debug("Checking for internet connection against [%s]" % probing_url)
# We do a HTTP request here to respect the HTTP proxy setting. If we'd open a plain socket connection we circumvent the
# proxy and erroneously conclude we don't have an Internet connection.
response = __http().request("GET", probing_url, timeout=2.0)
status = response.status
logger.debug("Probing result is HTTP status [%s]" % str(status))
return status == 200
except BaseException:
return False
def __http():
if not __HTTP:
init()
return __HTTP
def resolve(hostname_or_ip):
if hostname_or_ip and hostname_or_ip.startswith("127"):
return hostname_or_ip
import socket
addrinfo = socket.getaddrinfo(hostname_or_ip, 22, 0, 0, socket.IPPROTO_TCP)
for family, socktype, proto, canonname, sockaddr in addrinfo:
# we're interested in the IPv4 address
if family == socket.AddressFamily.AF_INET:
ip, _ = sockaddr
if ip[:3] != "127":
return ip
return None
| 42.643411
| 140
| 0.662061
|
9aee3dc438168bc2e52ef58786d1e6a60f5a2420
| 12,209
|
py
|
Python
|
facenet_pytorch/models/utils/detect_face.py
|
ashvah/facial-attributes-recognition
|
1d0ecef47f68dfa8673d479ea585a0873c2bdcfd
|
[
"MIT"
] | 1
|
2022-03-02T09:02:56.000Z
|
2022-03-02T09:02:56.000Z
|
facenet_pytorch/models/utils/detect_face.py
|
ashvah/facial-attributes-recognition
|
1d0ecef47f68dfa8673d479ea585a0873c2bdcfd
|
[
"MIT"
] | null | null | null |
facenet_pytorch/models/utils/detect_face.py
|
ashvah/facial-attributes-recognition
|
1d0ecef47f68dfa8673d479ea585a0873c2bdcfd
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn.functional import interpolate
from torchvision.transforms import functional as F
from torchvision.ops.boxes import batched_nms
from PIL import Image
import numpy as np
import os
import math
# OpenCV is optional, but required if using numpy arrays instead of PIL
try:
import cv2
except:
pass
def fixed_batch_process(im_data, model):
batch_size = 512
out = []
for i in range(0, len(im_data), batch_size):
batch = im_data[i:(i + batch_size)]
out.append(model(batch))
return tuple(torch.cat(v, dim=0) for v in zip(*out))
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
if isinstance(imgs, np.ndarray):
imgs = torch.as_tensor(imgs.copy(), device=device)
if isinstance(imgs, torch.Tensor):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs.copy(), device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
scale_picks = []
all_i = 0
offset = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
pick = batched_nms(boxes_scale[:, :4], boxes_scale[:, 4], image_inds_scale, 0.5)
scale_picks.append(pick + offset)
offset += boxes_scale.shape[0]
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0)
scale_picks = torch.cat(scale_picks, dim=0)
# NMS within each scale + image
boxes, image_inds = boxes[scale_picks], image_inds[scale_picks]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
# This is equivalent to out = rnet(im_data) to avoid GPU out of memory.
out = fixed_batch_process(im_data, rnet)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
# This is equivalent to out = onet(im_data) to avoid GPU out of memory.
out = fixed_batch_process(im_data, onet)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
image_inds = image_inds.cpu()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero()
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def pad(boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data
def crop_resize(img, box, image_size):
if isinstance(img, np.ndarray):
img = img[box[1]:box[3], box[0]:box[2]]
out = cv2.resize(
img,
(image_size, image_size),
interpolation=cv2.INTER_AREA
).copy()
elif isinstance(img, torch.Tensor):
img = img[box[1]:box[3], box[0]:box[2]]
out = imresample(
img.permute(2, 0, 1).unsqueeze(0).float(),
(image_size, image_size)
).byte().squeeze(0).permute(1, 2, 0)
else:
out = img.crop(box).copy().resize((image_size, image_size), Image.BILINEAR)
return out
def save_img(img, path):
if isinstance(img, np.ndarray):
cv2.imwrite(path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
else:
img.save(path)
def get_size(img):
if isinstance(img, (np.ndarray, torch.Tensor)):
return img.shape[1::-1]
else:
return img.size
def extract_face(img, box, image_size=160, margin=0, save_path=None):
"""Extract face + margin from PIL Image given bounding box.
Arguments:
img {PIL.Image} -- A PIL Image.
box {numpy.ndarray} -- Four-element bounding box.
image_size {int} -- Output image size in pixels. The image will be square.
margin {int} -- Margin to add to bounding box, in terms of pixels in the final image.
Note that the application of the margin differs slightly from the davidsandberg/facenet
repo, which applies the margin to the original image before resizing, making the margin
dependent on the original image size.
save_path {str} -- Save path for extracted face image. (default: {None})
Returns:
torch.tensor -- tensor representing the extracted face.
"""
margin = [
margin * (box[2] - box[0]) / (image_size - margin),
margin * (box[3] - box[1]) / (image_size - margin),
]
raw_image_size = get_size(img)
box = [
int(max(box[0] - margin[0] / 2, 0)),
int(max(box[1] - margin[1] / 2, 0)),
int(min(box[2] + margin[0] / 2, raw_image_size[0])),
int(min(box[3] + margin[1] / 2, raw_image_size[1])),
]
face = crop_resize(img, box, image_size)
if save_path is not None:
os.makedirs(os.path.dirname(save_path) + "/", exist_ok=True)
save_img(face, save_path)
face = F.to_tensor(np.float32(face))
return face
| 32.298942
| 99
| 0.572037
|
8c11e3c8ecf97965b022398c3d9ee90fd33eebff
| 1,666
|
py
|
Python
|
src/library/forms.py
|
klown/clusive
|
3c89dae967dfe190c2cdf269915c6ace968d1bfb
|
[
"bzip2-1.0.6"
] | null | null | null |
src/library/forms.py
|
klown/clusive
|
3c89dae967dfe190c2cdf269915c6ace968d1bfb
|
[
"bzip2-1.0.6"
] | null | null | null |
src/library/forms.py
|
klown/clusive
|
3c89dae967dfe190c2cdf269915c6ace968d1bfb
|
[
"bzip2-1.0.6"
] | null | null | null |
import logging
from django import forms
from library.models import Book
from roster.models import Period, ClusiveUser
logger = logging.getLogger(__name__)
class UploadForm(forms.Form):
file = forms.FileField(label='File')
class MetadataForm(forms.ModelForm):
cover = forms.FileField(required=False, label='Choose new image...')
cover.widget.attrs.update({'accept': 'image/*'})
use_orig_cover = forms.BooleanField(label='Use this', required=False, initial=False)
use_orig_cover.widget.attrs.update({'class': 'usethis-cover'})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label_suffix = ''
class Meta:
model = Book
fields = ['title', 'author', 'description']
widgets = {
'title': forms.TextInput(attrs={'placeholder': 'Content title'}),
'author': forms.TextInput(attrs={'placeholder': 'Author of the content'}),
'description': forms.Textarea(attrs={'placeholder': 'Provide a brief description to show on the Library page.'}),
}
class PeriodModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, period):
return period.name
class ShareForm(forms.Form):
periods = PeriodModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
queryset=Period.objects.all(),
required=False)
def __init__(self, *args, **kwargs):
clusive_user : ClusiveUser
clusive_user = kwargs.pop('user')
super().__init__(*args, **kwargs)
periods = clusive_user.periods.all()
self.fields['periods'].queryset = periods
| 30.290909
| 125
| 0.669268
|
5ebb9069060617a57ae7350b891eed25887f7d32
| 2,568
|
py
|
Python
|
invitation/migrations/0002_auto_20200211_2020.py
|
tiagocordeiro/django-easy-party
|
6e10a0e325e2cbc90426f48546f37bc60b0ec6b7
|
[
"MIT"
] | null | null | null |
invitation/migrations/0002_auto_20200211_2020.py
|
tiagocordeiro/django-easy-party
|
6e10a0e325e2cbc90426f48546f37bc60b0ec6b7
|
[
"MIT"
] | 184
|
2020-01-31T19:43:31.000Z
|
2022-03-18T16:09:50.000Z
|
invitation/migrations/0002_auto_20200211_2020.py
|
tiagocordeiro/django-easy-party
|
6e10a0e325e2cbc90426f48546f37bc60b0ec6b7
|
[
"MIT"
] | 1
|
2020-01-29T17:51:47.000Z
|
2020-01-29T17:51:47.000Z
|
# Generated by Django 3.0.3 on 2020-02-11 23:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invitation', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InviteCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Nome')),
('description', models.TextField(verbose_name='Descrição')),
],
options={
'verbose_name': 'categoria',
'verbose_name_plural': 'categorias',
},
),
migrations.CreateModel(
name='InviteTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('active', models.BooleanField(default=True, verbose_name='ativo')),
('name', models.CharField(max_length=50, verbose_name='Nome')),
('description', models.TextField(blank=True, null=True, verbose_name='Descrição')),
('background_image', models.ImageField(blank=True, upload_to='', verbose_name='Background')),
('title_font', models.FileField(blank=True, upload_to='', verbose_name='Fonte do título')),
('body_font', models.FileField(blank=True, upload_to='', verbose_name='Fonte do corpo')),
('title_text', models.CharField(default='Título', max_length=100, verbose_name='Título')),
('body_text', models.TextField(default='Corpo do texto', verbose_name='Corpo do texto')),
('invite_data', models.TextField(blank=True, null=True, verbose_name='invite_data')),
('categories', models.ManyToManyField(to='invitation.InviteCategory')),
],
options={
'verbose_name': 'modelo',
'verbose_name_plural': 'modelos',
},
),
migrations.AddField(
model_name='invite',
name='invite_template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='invitation.InviteTemplate'),
),
]
| 47.555556
| 136
| 0.593847
|
9758bf355c05862b26bd3e91b0c0f1c77f1e1de4
| 11,566
|
py
|
Python
|
src/interventions_labeling_lib/compared_terms_finder.py
|
MariyaIvanina/usaid_data_processing
|
a57d51f83f4ffbe36368589c6ccb1238d6390699
|
[
"Apache-2.0"
] | 3
|
2021-09-20T10:07:07.000Z
|
2021-12-11T13:33:40.000Z
|
src/interventions_labeling_lib/compared_terms_finder.py
|
MariyaIvanina/usaid_data_processing
|
a57d51f83f4ffbe36368589c6ccb1238d6390699
|
[
"Apache-2.0"
] | null | null | null |
src/interventions_labeling_lib/compared_terms_finder.py
|
MariyaIvanina/usaid_data_processing
|
a57d51f83f4ffbe36368589c6ccb1238d6390699
|
[
"Apache-2.0"
] | null | null | null |
import nltk
from allennlp.predictors.predictor import Predictor
from text_processing import text_normalizer
import re
import spacy
from interventions_labeling_lib import hearst_pattern_finder
from interventions_labeling_lib import hyponym_statistics
import os
import pickle
from text_processing import concepts_merger
nlp = spacy.load('en_core_web_sm')
class ComparedTermsFinder:
def __init__(self,search_engine_inverted_index, abbreviation_resolver, folder_to_save_temp_res):
self.predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz")
self.filter_words = ["compare","compares","compared","comparing"]
self.hyp_stat = hyponym_statistics.HyponymStatistics({},search_engine_inverted_index, abbreviation_resolver,{},{})
self.hearst_pattern = hearst_pattern_finder.HearstPatterns(for_finding_comparison = True)
self.folder_to_save_temp_res = folder_to_save_temp_res
def find_compared_terms_in_sentence(self, parse_sentence):
for pair in parse_sentence:
if text_normalizer.contain_verb_form(pair["verb"].lower(), self.filter_words):
first_part = ""
second_part = ""
verb = ""
description = pair["description"]
for m in re.finditer("\[(ARG|V).*?:.*?\]", description):
if "ARG2" in m.group(0):
second_part = m.group(0).split(":")[1][:-1].strip()
if "ARG1" in m.group(0):
first_part = m.group(0).split(":")[1][:-1].strip()
return first_part, second_part, pair["verb"], pair["description"]
return "", "","",""
def break_phrase_into_parts(self, text):
parts = text.split("and")
first_part, second_part = "", ""
if len(parts[0].split()) == 1 and len(parts[1].split()) > 1:
first_part = parts[0].strip() + " " + " ".join(parts[1].split()[1:]).strip()
second_part = parts[1].strip()
elif len(parts[1].split()) == 1:
first_part = parts[0].strip()
second_part = parts[1].strip()
return first_part, second_part
def clean_pattern_words(self, text):
return self.hyp_stat.clean_concept(self.hearst_pattern.clean_hyponym_term(text.replace("NP_","").replace("_"," ")))
def clean_result(self, text, search_engine_inverted_index):
cleaned_text = self.hearst_pattern.clean_hyponym_term(text)
if len(cleaned_text.split()) < 4:
return self.hyp_stat.clean_concept(cleaned_text)
np_text = self.hearst_pattern.replace_np_sequences(text).replace("_of_", " of NP_")
for phr in nlp(np_text).noun_chunks:
cleaned_text = self.clean_pattern_words(phr.text)
if cleaned_text == "":
continue
freq_percent = len(search_engine_inverted_index.find_articles_with_keywords([cleaned_text],threshold=1.0, extend_with_abbreviations=False))/search_engine_inverted_index.total_articles_number
if len(set([tag.text for tag in phr.rights]).intersection(set(["of", "in"]))) > 0 and freq_percent > 0.01:
continue
if freq_percent < 0.01:
return cleaned_text
for phr in nlp(np_text).noun_chunks:
cleaned_text = self.clean_pattern_words(phr.text)
if cleaned_text != "":
return cleaned_text
return cleaned_text
def find_raw_compared_items_sentence_parsing(self, articles_df, search_engine_inverted_index):
res_all = {}
all_articles = search_engine_inverted_index.find_articles_with_keywords(self.filter_words, threshold = 1.0, extend_with_abbreviations = False)
cnt = 0
for i in all_articles:
try:
if i not in res_all:
res_all[i] = []
cnt += 1
if cnt % 500 == 0:
print("%d artciles processed"%cnt)
sentences = [text_normalizer.remove_accented_chars(articles_df["title"].values[i].lower()\
if articles_df["title"].values[i].isupper() else articles_df["title"].values[i])]
sentences.extend(nltk.sent_tokenize(text_normalizer.remove_accented_chars(articles_df["abstract"].values[i])))
for sentence in sentences:
if text_normalizer.has_word_in_sentence(sentence, self.filter_words):
parse_sentence = self.predictor.predict(sentence=sentence)["verbs"]
first_part, second_part, verb, description = self.find_compared_terms_in_sentence(parse_sentence)
for v in parse_sentence:
res = {}
for m in re.finditer("\[(ARG|V).*?:.*?\]", v["description"]):
tag = m.group(0).split(":")[0][1:].strip()
tag_text = m.group(0).split(":")[1][:-1].strip()
if tag not in res:
res[tag] = []
res[tag].append(tag_text)
for tag in res:
for arg in res[tag]:
if verb in arg and tag != "V" and (first_part == "" or first_part not in arg or (first_part in arg and nlp(arg.split()[0])[0].tag_ == "IN")):
search_tag = "ARG1"
if tag == "ARGV-TMP":
search_tag = "ARG0"
if re.search("ARG\d+", tag):
search_tag = "ARG" + str(int(re.search("\d+", tag).group(0))-1)
if search_tag in res:
first_part = res[search_tag][0]
break
if first_part == "":
for m in re.finditer("\[(ARG|V).*?:.*?\]", description):
if "ARG1" in m.group(0):
first_part = m.group(0).split(":")[1][:-1].strip()
break
if (" and " in first_part and second_part == "") or (" and " in second_part and first_part == ""):
first_part, second_part = self.break_phrase_into_parts(first_part + second_part)
res_all[i].append((sentence, first_part, second_part))
except KeyboardInterrupt:
raise
except Exception as err:
print("error occured for %d article"%i)
print(err)
return res_all
def clean_found_compared_items(self, res_all, search_engine_inverted_index):
res_all_cleaned = {}
for i in res_all:
res_all_cleaned[i] = []
for part in res_all[i]:
res_all_cleaned[i].append((self.clean_result(part[1], search_engine_inverted_index).strip(), self.clean_result(part[2], search_engine_inverted_index).strip()))
return res_all_cleaned
def fill_compared_items(self, articles_df, search_engine_inverted_index):
if not os.path.exists(self.folder_to_save_temp_res):
os.makedirs(self.folder_to_save_temp_res)
if os.path.exists(os.path.join(self.folder_to_save_temp_res, "res_all.pickle")):
res_all = pickle.load(open(os.path.join(self.folder_to_save_temp_res, "res_all.pickle"), "rb"))
else:
res_all = self.find_raw_compared_items_sentence_parsing(articles_df, search_engine_inverted_index)
pickle.dump(res_all, open(os.path.join(self.folder_to_save_temp_res, "res_all.pickle"),"wb"))
if os.path.exists(os.path.join(self.folder_to_save_temp_res, "res_all_cleaned.pickle")):
res_all_cleaned = pickle.load(open(os.path.join(self.folder_to_save_temp_res, "res_all_cleaned.pickle"), "rb"))
else:
res_all_cleaned = self.clean_found_compared_items(res_all, search_engine_inverted_index)
pickle.dump(res_all_cleaned, open(os.path.join(self.folder_to_save_temp_res, "res_all_cleaned.pickle"),"wb"))
if os.path.exists(os.path.join(self.folder_to_save_temp_res, "res_all_patterns.pickle")):
res_all_patterns = pickle.load(open(os.path.join(self.folder_to_save_temp_res, "res_all_patterns.pickle"),"rb"))
else:
res_all_patterns = self.find_compared_items_via_patterns(articles_df, search_engine_inverted_index)
pickle.dump(res_all_patterns, open(os.path.join(self.folder_to_save_temp_res, "res_all_patterns.pickle"),"wb"))
common_res = self.merge_results(res_all_patterns, res_all_cleaned, search_engine_inverted_index)
articles_df["compared_terms"] = ""
for i in range(len(articles_df)):
articles_df["compared_terms"].values[i] = []
if i in res_all_cleaned:
for term in res_all_cleaned[i]:
articles_df["compared_terms"].values[i].extend([t.strip() for t in term if t.strip() != ""])
if i in res_all_patterns:
for term in res_all_patterns[i]:
if term.strip() != "":
articles_df["compared_terms"].values[i].append(term.strip())
return articles_df
def merge_results(self, res_all_patterns, res_all_cleaned, search_engine_inverted_index):
common_res = {}
for i in res_all_cleaned:
for term in res_all_cleaned[i]:
if i not in common_res:
common_res[i] = []
common_res[i].extend([t.strip() for t in term if t.strip() != ""])
for i in res_all_patterns:
for term in res_all_patterns[i]:
if i not in common_res:
common_res[i] = []
if term.strip() != "":
common_res[i].append(term.strip())
_concepts_merger = concepts_merger.ConceptsMerger(5)
for i in common_res:
for term in common_res[i]:
_concepts_merger.add_item_to_dict(term, i)
_concepts_merger.merge_concepts(search_engine_inverted_index)
for i in common_res:
new_list = []
for term in common_res[i]:
new_list.append(_concepts_merger.new_mapping[term] if term in _concepts_merger.new_mapping else term)
common_res[i] = new_list
return common_res
def find_compared_items_via_patterns(self, articles_df, search_engine_inverted_index):
res_all = {}
cnt = 0
for i in search_engine_inverted_index.find_articles_with_keywords(["comparison"], threshold = 1.0, extend_with_abbreviations = False):
cnt += 1
if cnt %500 == 0:
print("%d articles processed"%cnt)
if i not in res_all:
title = text_normalizer.remove_accented_chars(articles_df["title"].values[i].lower()\
if articles_df["title"].values[i].isupper() else articles_df["title"].values[i])
abstract = text_normalizer.remove_accented_chars(articles_df["abstract"].values[i])
res_all[i] = [self.hyp_stat.clean_concept(expr) for expr in self.hearst_pattern.find_compared_items(title + " . " + abstract)]
return res_all
| 55.07619
| 202
| 0.5894
|
089c0b65db07ff5f470083b045737b16426f4f24
| 1,060
|
py
|
Python
|
python/loanStreetClient.py
|
McGuire00/LoanStreet
|
8a87e7de87e09e2181cb8fd50f15d1d65d631d87
|
[
"MIT"
] | null | null | null |
python/loanStreetClient.py
|
McGuire00/LoanStreet
|
8a87e7de87e09e2181cb8fd50f15d1d65d631d87
|
[
"MIT"
] | null | null | null |
python/loanStreetClient.py
|
McGuire00/LoanStreet
|
8a87e7de87e09e2181cb8fd50f15d1d65d631d87
|
[
"MIT"
] | null | null | null |
import requests
# home page
# http://localhost:3000/
def get_home_page(link):
"""performs a GET request to the / endpoint """
# returns message: "Welcome"
get = requests.get(link)
print(get.text)
# get_info("http://localhost:3000/")
def get_all_loans(link):
""" performs a GET request to the /api/loans endpoint """
get = requests.get(link)
print(get.json())
def get_loan_by_id(link):
"""performs a GET request to the /api/loans/:{insert loanId here} endpoint """
get = requests.get(link)
print(get.text)
def update_loan_by_id(link, data):
"""performs a PUT request to the /api/loans/:{insert loanId here} endpoint """
put = requests.put(link, json=data)
print(put.text)
def delete_loan_by_id(link):
"""performs a DELETE request to the /api/loans/:{insert loanId here} endpoint """
delete = requests.delete(link)
print(delete.text)
def submit_new_loan(link, data):
"""performs a POST request to the / endpoint """
post = requests.post(link, json=data)
print(post.text)
| 23.555556
| 85
| 0.667925
|
fb7b9d68d63444d6433a979e0b4ab9f5b4ec151e
| 16,764
|
py
|
Python
|
mcpi_e/minecraft.py
|
stoneskin/mcpi_e
|
9c9bd1c1c20f8b117ecb97af9893f251237d2329
|
[
"MIT"
] | 2
|
2020-05-23T18:20:34.000Z
|
2021-10-09T09:09:13.000Z
|
mcpi_e/minecraft.py
|
stoneskin/mcpi_e
|
9c9bd1c1c20f8b117ecb97af9893f251237d2329
|
[
"MIT"
] | null | null | null |
mcpi_e/minecraft.py
|
stoneskin/mcpi_e
|
9c9bd1c1c20f8b117ecb97af9893f251237d2329
|
[
"MIT"
] | 2
|
2021-03-07T10:40:23.000Z
|
2021-11-07T14:36:40.000Z
|
from .connection import Connection
from .vec3 import Vec3
from .event import BlockEvent, ChatEvent, ProjectileEvent
from .entity import Entity
from .block import Block
import math
from .util import flatten
import sys
from .logger import *
import mcpi_e.settings as settings
""" Minecraft PI low level api v0.1_1
Note: many methods have the parameter *arg. This solution makes it
simple to allow different types, and variable number of arguments.
The actual magic is a mix of flatten_parameters() and __iter__. Example:
A Cube class could implement __iter__ to work in Minecraft.setBlocks(c, id).
(Because of this, it's possible to "erase" arguments. CmdPlayer removes
entityId, by injecting [] that flattens to nothing)
@author: Aron Nieminen, Mojang AB"""
""" Updated to include functionality provided by RaspberryJuice:
- getBlocks()
- getDirection()
- getPitch()
- getRotation()
- getPlayerEntityId()
- pollChatPosts()
- setSign()
- spawnEntity()
- getEntities()
- removeEntity()
- removeEntityType()
"""
def intFloor(*args):
return [int(math.floor(x)) for x in flatten(args)]
class CmdPositioner:
"""Methods for setting and getting positions"""
def __init__(self, connection, packagePrefix):
self.conn = connection
self.pkg = packagePrefix
def getPos(self, id):
"""Get entity position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + b".getPos", id)
return Vec3(*list(map(float, s.split(","))))
def setPos(self, id, *args):
"""Set entity position (entityId:int, x,y,z)"""
self.conn.send(self.pkg + b".setPos", id, args)
def getTilePos(self, id):
"""Get entity tile position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + b".getTile", id)
return Vec3(*list(map(int, s.split(","))))
def setTilePos(self, id, *args):
"""Set entity tile position (entityId:int) => Vec3"""
self.conn.send(self.pkg + b".setTile", id, intFloor(*args))
def setDirection(self, id, *args):
"""Set entity direction (entityId:int, x,y,z)"""
self.conn.send(self.pkg + b".setDirection", id, args)
def getDirection(self, id):
"""Get entity direction (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + b".getDirection", id)
return Vec3(*map(float, s.split(",")))
def setRotation(self, id, yaw):
"""Set entity rotation (entityId:int, yaw)"""
self.conn.send(self.pkg + b".setRotation", id, yaw)
def getRotation(self, id):
"""get entity rotation (entityId:int) => float"""
return float(self.conn.sendReceive(self.pkg + b".getRotation", id))
def setPitch(self, id, pitch):
"""Set entity pitch (entityId:int, pitch)"""
self.conn.send(self.pkg + b".setPitch", id, pitch)
def getPitch(self, id):
"""get entity pitch (entityId:int) => float"""
return float(self.conn.sendReceive(self.pkg + b".getPitch", id))
def setting(self, setting, status):
"""Set a player setting (setting, status). keys: autojump"""
self.conn.send(self.pkg + b".setting", setting, 1 if bool(status) else 0)
class CmdEntity(CmdPositioner):
"""Methods for entities"""
def __init__(self, connection):
CmdPositioner.__init__(self, connection, b"entity")
def getName(self, id):
"""Get the list name of the player with entity id => [name:str]
Also can be used to find name of entity if entity is not a player."""
return self.conn.sendReceive(b"entity.getName", id)
def getEntities(self, id, distance=10, typeId=-1):
"""Return a list of entities near entity (playerEntityId:int, distanceFromPlayerInBlocks:int, typeId:int) => [[entityId:int,entityTypeId:int,entityTypeName:str,posX:float,posY:float,posZ:float]]"""
"""If distanceFromPlayerInBlocks:int is not specified then default 10 blocks will be used"""
s = self.conn.sendReceive(b"entity.getEntities", id, distance, typeId)
entities = [e for e in s.split("|") if e]
return [ [int(n.split(",")[0]), int(n.split(",")[1]), n.split(",")[2], float(n.split(",")[3]), float(n.split(",")[4]), float(n.split(",")[5])] for n in entities]
def removeEntities(self, id, distance=10, typeId=-1):
"""Remove entities all entities near entity (playerEntityId:int, distanceFromPlayerInBlocks:int, typeId:int, ) => (removedEntitiesCount:int)"""
"""If distanceFromPlayerInBlocks:int is not specified then default 10 blocks will be used"""
return int(self.conn.sendReceive(b"entity.removeEntities", id, distance, typeId))
def pollBlockHits(self, *args):
"""Only triggered by sword => [BlockEvent]"""
s = self.conn.sendReceive(b"entity.events.block.hits", intFloor(args))
events = [e for e in s.split("|") if e]
return [BlockEvent.Hit(*list(map(int, e.split(",")))) for e in events]
def pollChatPosts(self, *args):
"""Triggered by posts to chat => [ChatEvent]"""
s = self.conn.sendReceive(b"entity.events.chat.posts", intFloor(args))
events = [e for e in s.split("|") if e]
return [ChatEvent.Post(int(e[:e.find(",")]), e[e.find(",") + 1:]) for e in events]
def pollProjectileHits(self, *args):
"""Only triggered by projectiles => [BlockEvent]"""
s = self.conn.sendReceive(b"entity.events.projectile.hits", intFloor(args))
events = [e for e in s.split("|") if e]
results = []
for e in events:
info = e.split(",")
results.append(ProjectileEvent.Hit(
int(info[0]),
int(info[1]),
int(info[2]),
int(info[3]),
info[4],
info[5]))
return results
def clearEvents(self, *args):
"""Clear the entities events"""
self.conn.send(b"entity.events.clear", intFloor(args))
class CmdPlayer(CmdPositioner):
"""Methods for the host (Raspberry Pi) player"""
def __init__(self, connection,playerId):
CmdPositioner.__init__(self, connection, b"player")
self.conn = connection
self.playerId=playerId
def getPos(self):
return CmdPositioner.getPos(self, self.playerId)
def setPos(self, *args):
return CmdPositioner.setPos(self, self.playerId, args)
def getTilePos(self):
return CmdPositioner.getTilePos(self, self.playerId)
def setTilePos(self, *args):
return CmdPositioner.setTilePos(self, self.playerId, args)
def setDirection(self, *args):
return CmdPositioner.setDirection(self, self.playerId, args)
def getDirection(self):
return CmdPositioner.getDirection(self, self.playerId)
def setRotation(self, yaw):
return CmdPositioner.setRotation(self,self.playerId, yaw)
def getRotation(self):
return CmdPositioner.getRotation(self, self.playerId)
def setPitch(self, pitch):
return CmdPositioner.setPitch(self, self.playerId, pitch)
def getPitch(self):
return CmdPositioner.getPitch(self, self.playerId)
def getEntities(self, distance=10, typeId=-1):
"""Return a list of entities near entity (distanceFromPlayerInBlocks:int, typeId:int) => [[entityId:int,entityTypeId:int,entityTypeName:str,posX:float,posY:float,posZ:float]]"""
"""If distanceFromPlayerInBlocks:int is not specified then default 10 blocks will be used"""
s = self.conn.sendReceive(b"player.getEntities", distance, typeId)
entities = [e for e in s.split("|") if e]
return [ [int(n.split(",")[0]), int(n.split(",")[1]), n.split(",")[2], float(n.split(",")[3]), float(n.split(",")[4]), float(n.split(",")[5])] for n in entities]
def removeEntities(self, distance=10, typeId=-1):
"""Remove entities all entities near entity (distanceFromPlayerInBlocks:int, typeId:int, ) => (removedEntitiesCount:int)"""
"""If distanceFromPlayerInBlocks:int is not specified then default 10 blocks will be used"""
return int(self.conn.sendReceive(b"player.removeEntities", distance, typeId))
def pollBlockHits(self):
"""Only triggered by sword => [BlockEvent]"""
s = self.conn.sendReceive(b"player.events.block.hits")
events = [e for e in s.split("|") if e]
return [BlockEvent.Hit(*list(map(int, e.split(",")))) for e in events]
def pollChatPosts(self):
"""Triggered by posts to chat => [ChatEvent]"""
s = self.conn.sendReceive(b"player.events.chat.posts")
events = [e for e in s.split("|") if e]
return [ChatEvent.Post(int(e[:e.find(",")]), e[e.find(",") + 1:]) for e in events]
def pollProjectileHits(self):
"""Only triggered by projectiles => [BlockEvent]"""
s = self.conn.sendReceive(b"player.events.projectile.hits")
events = [e for e in s.split("|") if e]
results = []
for e in events:
info = e.split(",")
results.append(ProjectileEvent.Hit(
int(info[0]),
int(info[1]),
int(info[2]),
int(info[3]),
info[4],
info[5]))
return results
def clearEvents(self):
"""Clear the players events"""
self.conn.send(b"player.events.clear")
class CmdPlayerEntity(CmdPlayer):
""" use entity to build a player """
def __init__(self, connection,playerId):
CmdPositioner.__init__(self, connection, b"entity")
self.conn = connection
self.playerId=playerId
def getPos(self):
return CmdPositioner.getPos(self, self.playerId)
class CmdCamera:
def __init__(self, connection):
self.conn = connection
def setNormal(self, *args):
"""Set camera mode to normal Minecraft view ([entityId])"""
self.conn.send(b"camera.mode.setNormal", args)
def setFixed(self):
"""Set camera mode to fixed view"""
self.conn.send(b"camera.mode.setFixed")
def setFollow(self, *args):
"""Set camera mode to follow an entity ([entityId])"""
self.conn.send(b"camera.mode.setFollow", args)
def setPos(self, *args):
"""Set camera entity position (x,y,z)"""
self.conn.send(b"camera.setPos", args)
class CmdEvents:
"""Events"""
def __init__(self, connection):
self.conn = connection
def clearAll(self):
"""Clear all old events"""
self.conn.send(b"events.clear")
def pollBlockHits(self):
"""Only triggered by sword => [BlockEvent]"""
s = self.conn.sendReceive(b"events.block.hits")
events = [e for e in s.split("|") if e]
return [BlockEvent.Hit(*list(map(int, e.split(",")))) for e in events]
def pollChatPosts(self):
"""Triggered by posts to chat => [ChatEvent]"""
s = self.conn.sendReceive(b"events.chat.posts")
events = [e for e in s.split("|") if e]
return [ChatEvent.Post(int(e[:e.find(",")]), e[e.find(",") + 1:]) for e in events]
def pollProjectileHits(self):
"""Only triggered by projectiles => [BlockEvent]"""
s = self.conn.sendReceive(b"events.projectile.hits")
events = [e for e in s.split("|") if e]
results = []
for e in events:
info = e.split(",")
results.append(ProjectileEvent.Hit(
int(info[0]),
int(info[1]),
int(info[2]),
int(info[3]),
info[4],
info[5]))
return results
class Minecraft:
"""The main class to interact with a running instance of Minecraft Pi."""
def __init__(self, connection,playerId):
self.conn = connection
self.camera = CmdCamera(connection)
self.entity = CmdEntity(connection)
self.cmdplayer = CmdPlayer(connection,playerId)
self.player=CmdPlayerEntity(connection,playerId)
self.events = CmdEvents(connection)
self.playerId= playerId
self.settings=settings
def getBlock(self, *args):
"""Get block (x,y,z) => id:int"""
return int(self.conn.sendReceive(b"world.getBlock", intFloor(args)))
def getBlockWithData(self, *args):
"""Get block with data (x,y,z) => Block"""
ans = self.conn.sendReceive(b"world.getBlockWithData", intFloor(args))
return Block(*list(map(int, ans.split(","))))
def getBlocks(self, *args):
"""Get a cuboid of blocks (x0,y0,z0,x1,y1,z1) => [id:int]"""
s = self.conn.sendReceive(b"world.getBlocks", intFloor(args))
return map(int, s.split(","))
def setBlock(self, *args):
"""Set block (x,y,z,id,[data])"""
self.conn.send(b"world.setBlock", intFloor(args))
def setBlocks(self, *args):
"""Set a cuboid of blocks (x0,y0,z0,x1,y1,z1,id,[data])"""
self.conn.send(b"world.setBlocks", intFloor(args))
def setSign(self, *args):
"""Set a sign (x,y,z,id,data,[line1,line2,line3,line4])
Wall signs (id=68) require data for facing direction 2=north, 3=south, 4=west, 5=east
Standing signs (id=63) require data for facing rotation (0-15) 0=south, 4=west, 8=north, 12=east
@author: Tim Cummings https://www.triptera.com.au/wordpress/"""
lines = []
flatargs = []
for arg in flatten(args):
flatargs.append(arg)
for flatarg in flatargs[5:]:
lines.append(flatarg.replace(",",";").replace(")","]").replace("(","["))
self.conn.send(b"world.setSign",intFloor(flatargs[0:5]) + lines)
def spawnEntity(self, *args):
"""Spawn entity (x,y,z,id)"""
return int(self.conn.sendReceive(b"world.spawnEntity", args))
def getHeight(self, *args):
"""Get the height of the world (x,z) => int"""
return int(self.conn.sendReceive(b"world.getHeight", intFloor(args)))
def getPlayerEntityIds(self):
"""Get the entity ids of the connected players => [id:int]"""
ids = self.conn.sendReceive(b"world.getPlayerIds")
return list(map(int, ids.split("|")))
def getPlayerEntityId(self, name):
"""Get the entity id of the named player => [id:int]"""
return int(self.conn.sendReceive(b"world.getPlayerId", name))
def saveCheckpoint(self):
"""Save a checkpoint that can be used for restoring the world"""
self.conn.send(b"world.checkpoint.save")
def restoreCheckpoint(self):
"""Restore the world state to the checkpoint"""
self.conn.send(b"world.checkpoint.restore")
def postToChat(self, msg):
"""Post a message to the game chat"""
self.conn.send(b"chat.post", msg)
def setting(self, setting, status):
"""Set a world setting (setting, status). keys: world_immutable, nametags_visible"""
self.conn.send(b"world.setting", setting, 1 if bool(status) else 0)
def getEntityTypes(self):
"""Return a list of Entity objects representing all the entity types in Minecraft"""
s = self.conn.sendReceive(b"world.getEntityTypes")
types = [t for t in s.split("|") if t]
return [Entity(int(e[:e.find(",")]), e[e.find(",") + 1:]) for e in types]
def getEntities(self, typeId=-1):
"""Return a list of all currently loaded entities (EntityType:int) => [[entityId:int,entityTypeId:int,entityTypeName:str,posX:float,posY:float,posZ:float]]"""
s = self.conn.sendReceive(b"world.getEntities", typeId)
entities = [e for e in s.split("|") if e]
return [[int(n.split(",")[0]), int(n.split(",")[1]), n.split(",")[2], float(n.split(",")[3]), float(n.split(",")[4]), float(n.split(",")[5])] for n in entities]
def removeEntity(self, id):
"""Remove entity by id (entityId:int) => (removedEntitiesCount:int)"""
return int(self.conn.sendReceive(b"world.removeEntity", int(id)))
def removeEntities(self, typeId=-1):
"""Remove entities all currently loaded Entities by type (typeId:int) => (removedEntitiesCount:int)"""
return int(self.conn.sendReceive(b"world.removeEntities", typeId))
@staticmethod
def create(address = "localhost", port = 4711,playerName=""):
log("Running Python version:"+sys.version)
conn=Connection(address, port)
playerId=[]
if playerName!="":
playerId= int(conn.sendReceive(b"world.getPlayerId", playerName))
log("get {} playerid={}".format(playerName, playerId))
return Minecraft(conn,playerId)
#settings
if __name__ == "__main__":
#initSettings()
mc = Minecraft.create()
mc.postToChat("Hello, Minecraft!")
| 40.788321
| 205
| 0.617872
|
f7bdded42b6ce80fcb118efc440779f27dcd7ec2
| 1,488
|
py
|
Python
|
tests/test_empty_repo.py
|
BB-Open/datenadler_rdf4j
|
b63df5292b99a0f49b455b6728246c59d7a8b0d7
|
[
"MIT"
] | 2
|
2021-12-29T17:59:46.000Z
|
2022-01-29T07:50:57.000Z
|
tests/test_empty_repo.py
|
BB-Open/datenadler_rdf4j
|
b63df5292b99a0f49b455b6728246c59d7a8b0d7
|
[
"MIT"
] | null | null | null |
tests/test_empty_repo.py
|
BB-Open/datenadler_rdf4j
|
b63df5292b99a0f49b455b6728246c59d7a8b0d7
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from unittest import TestCase
from pyrdf4j.api_graph import APIGraph
from pyrdf4j.errors import URINotReachable
from pyrdf4j.rdf4j import RDF4J
from tests.constants import AUTH, RDF4J_BASE_TEST
class TestEmpty(TestCase):
def setUp(self):
self.rdf4j = RDF4J(RDF4J_BASE_TEST)
self.rdf4j.create_repository('test_bulk_load', auth=AUTH['admin'], overwrite=True, repo_type='native')
self.response_code_ok = HTTPStatus.OK
def tearDown(self):
sparql_endpoint = self.rdf4j.drop_repository('test_bulk_load', auth=AUTH['admin'], accept_not_exist=True)
def test_empty(self):
response = self.rdf4j.bulk_load_from_uri(
'test_bulk_load',
'https://opendata.potsdam.de/api/v2/catalog/exports/ttl',
'application/x-turtle',
auth=AUTH['admin'],
)
response = self.rdf4j.empty_repository('test_bulk_load', auth=AUTH['admin'])
QUERY = "CONSTRUCT {?s ?o ?p} WHERE {?s ?o ?p}"
response = self.rdf4j.get_triple_data_from_query(
'test_bulk_load',
QUERY,
auth=AUTH['viewer'],
)
self.assertTrue('Potsdam' not in response.decode('utf-8'))
class TestEmptyGraph(TestEmpty):
def setUp(self):
self.rdf4j = RDF4J(RDF4J_BASE_TEST, api=APIGraph)
self.rdf4j.create_repository('test_bulk_load', auth=AUTH['admin'], overwrite=True)
self.response_code_ok = HTTPStatus.NO_CONTENT
| 33.818182
| 113
| 0.672715
|
21add1500c80472f2d12983dadb27d14be7b7fbd
| 21,915
|
py
|
Python
|
src/lib/model/networks/dla.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 475
|
2021-03-13T16:33:36.000Z
|
2022-03-30T06:00:39.000Z
|
src/lib/model/networks/dla.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 50
|
2021-03-17T04:48:20.000Z
|
2022-03-08T13:55:32.000Z
|
src/lib/model/networks/dla.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 98
|
2021-03-14T12:12:49.000Z
|
2022-03-19T16:19:13.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .base_model import BaseModel
try:
from .DCNv2.dcn_v2 import DCN
except:
print('import DCN failed')
DCN = None
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False,
opt=None):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x, pre_img=None, pre_hm=None):
y = []
x = self.base_layer(x)
if pre_img is not None:
x = x + self.pre_img_layer(pre_img)
if pre_hm is not None:
x = x + self.pre_hm_layer(pre_hm)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights, strict=False)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla34', hash='ba72cf86')
else:
print('Warning: No ImageNet pretrain!!')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla102', hash='d94d9790')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=None, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla169', hash='0914e092')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class Conv(nn.Module):
def __init__(self, chi, cho):
super(Conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x)
class GlobalConv(nn.Module):
def __init__(self, chi, cho, k=7, d=1):
super(GlobalConv, self).__init__()
gcl = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)),
nn.Conv2d(cho, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))))
gcr = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))),
nn.Conv2d(cho, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)))
fill_fc_weights(gcl)
fill_fc_weights(gcr)
self.gcl = gcl
self.gcr = gcr
self.act = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.gcl(x) + self.gcr(x)
x = self.act(x)
return x
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f, node_type=(DeformConv, DeformConv)):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = node_type[0](c, o)
node = node_type[1](o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None,
node_type=DeformConv):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j],
node_type=node_type))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
DLA_NODE = {
'dcn': (DeformConv, DeformConv),
'gcn': (Conv, GlobalConv),
'conv': (Conv, Conv),
}
class DLASeg(BaseModel):
def __init__(self, num_layers, heads, head_convs, opt):
super(DLASeg, self).__init__(
heads, head_convs, 1, 64 if num_layers == 34 else 128, opt=opt)
down_ratio=4
self.opt = opt
self.node_type = DLA_NODE[opt.dla_node]
print('Using node type:', self.node_type)
self.first_level = int(np.log2(down_ratio))
self.last_level = 5
self.base = globals()['dla{}'.format(num_layers)](
pretrained=(opt.load_model == ''), opt=opt)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(
self.first_level, channels[self.first_level:], scales,
node_type=self.node_type)
out_channel = channels[self.first_level]
self.ida_up = IDAUp(
out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)],
node_type=self.node_type)
def img2feats(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1]]
def imgpre2feats(self, x, pre_img=None, pre_hm=None):
x = self.base(x, pre_img, pre_hm)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1]]
def img2feats_prev(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1].detach()]
| 34.620853
| 106
| 0.559115
|
6557955fd8e4636a144792b64b4cc0e636a5d7c0
| 1,462
|
py
|
Python
|
WeatherStationSensorsReader/sensors/rainfall_sensor.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
WeatherStationSensorsReader/sensors/rainfall_sensor.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
WeatherStationSensorsReader/sensors/rainfall_sensor.py
|
weather-station-project/weather-station-sensors-reader
|
cda7902ee382248b41d14b9a2c0543817decbb4a
|
[
"MIT"
] | null | null | null |
import logging
from gpiozero import Button
from health_check.health_check_file_manager import register_success_for_class_into_health_check_file
from sensors.sensor import Sensor
class RainfallSensor(Sensor):
"""Represents the sensor which measures rainfall"""
BUCKET_SIZE_IN_MM = 0.2794
def __init__(self, rain_gauge_port_number):
self.button = Button(pin=rain_gauge_port_number)
self.button.when_pressed = self.get_reading
super().__init__()
logging.debug(msg=f'[{self.__class__.__name__}] Started on the port "{rain_gauge_port_number}".')
def add_value_to_readings(self):
# This sensor does not need to read async as the method when_pressed is the one which does it
pass
def get_reading(self):
logging.debug(msg=f'[{self.__class__.__name__}] Pressed.')
self.readings.append(1)
def get_readings_average(self):
try:
self.getting_readings = True
sensor_name = self.__class__.__name__
logging.debug(msg=f'[{sensor_name}] Getting amount of rain from the value "{len(self.readings)}"')
average = self.get_average()
register_success_for_class_into_health_check_file(class_name=sensor_name)
return average
finally:
del self.readings[:]
self.getting_readings = False
def get_average(self):
return [len(self.readings) * self.BUCKET_SIZE_IN_MM]
| 31.782609
| 110
| 0.690834
|
22d1234329d8dc4f29fe9fde42889fb6ec7a06d3
| 2,823
|
py
|
Python
|
GunsApp/app.py
|
rabest265/GunViolence
|
dbe51d40fb959f624d482619549f6e21a80409d3
|
[
"CNRI-Python",
"OML"
] | null | null | null |
GunsApp/app.py
|
rabest265/GunViolence
|
dbe51d40fb959f624d482619549f6e21a80409d3
|
[
"CNRI-Python",
"OML"
] | null | null | null |
GunsApp/app.py
|
rabest265/GunViolence
|
dbe51d40fb959f624d482619549f6e21a80409d3
|
[
"CNRI-Python",
"OML"
] | null | null | null |
from flask import Flask, render_template, redirect, jsonify
from flask_pymongo import PyMongo
from datetime import datetime
import json
import os
# from bson.json_util import loads
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
MONGO_URI = os.environ.get('MONGO_URI')
if not MONGO_URI:
MONGO_URI = "mongodb://localhost:27017/guns"
app.config['MONGO_URI'] = MONGO_URI
mongo = PyMongo(app)
# Define shooting list
ShootList = ["mass shooting", "no injuries", "injuries only", "some dead"]
# ShootList = ["mass shooting"]
@app.route("/")
def home():
return render_template("index.html", ShootList = ShootList)
@app.route("/maps")
def charts():
return render_template("maps.html", ShootList = ShootList)
@app.route("/benchmark")
def bench():
return render_template("benchmark.html", ShootList = ShootList)
@app.route("/interactive_chart")
def intercharts():
return render_template("interactive_chart.html", ShootList = ShootList)
@app.route("/wordcloud")
def wordcloud():
return render_template("wordcloud.html", ShootList = ShootList)
@app.route("/jsonifiedcities")
def jsonifiedcities():
citylist = []
cityinfo = mongo.db.cities.find()
for city in cityinfo:
del city["_id"]
citylist.append(city)
return jsonify(citylist)
@app.route("/jsonifiedguns")
def jsonifiedguns():
gunlist = []
guninfo = mongo.db.guns.find()
for gun in guninfo:
del gun["_id"]
if gun["shoot_type"] in ShootList:
gunlist.append(gun)
return jsonify(gunlist)
@app.route("/jsonifiedguns/<yr>")
def jsonifiedgunsy(yr):
gunlist = []
if(yr=="all"):
guninfo = mongo.db.guns.find()
else:
guninfo = mongo.db.guns.find({ "year": int(yr) })
for gun in guninfo:
del gun["_id"]
if gun["shoot_type"] in ShootList:
gunlist.append(gun)
# print(len(gunlist))
return jsonify(gunlist)
@app.route("/jsonifiedstates")
def jsonifiedstates():
statelist = []
stateinfo = mongo.db.states.find()
for state in stateinfo:
del state["_id"]
statelist.append(state)
return jsonify(statelist)
@app.route("/jsonifiedsummary")
def jsonifiedsummary():
summarylist = []
summaryinfo = mongo.db.guns_summary.find()
for shoot_type in summaryinfo:
del shoot_type["_id"]
summarylist.append(shoot_type)
return jsonify(summarylist)
@app.route("/jsonifiedstatesummary")
def jsonifiedstatesummary():
statesummarylist = []
statesummaryinfo = mongo.db.state_summary.find()
for shoot_type in statesummaryinfo:
del shoot_type["_id"]
statesummarylist.append(shoot_type)
return jsonify(statesummarylist)
if __name__ == "__main__":
app.run(debug=True)
| 26.138889
| 75
| 0.676585
|
e99f43bdc9b2e5bc8f245ccbff0d79feef5b358d
| 6,076
|
py
|
Python
|
2021/python/day-05.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | 1
|
2021-12-04T18:09:44.000Z
|
2021-12-04T18:09:44.000Z
|
2021/python/day-05.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | null | null | null |
2021/python/day-05.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | null | null | null |
from collections import Counter
from functools import partial
import aoc
from tadhg_utils import (
splitstrip,
splitstriplines,
lmap,
lconcat,
lcompact,
)
INPUT, TEST = aoc.get_inputs(__file__)
TA1 = 5
TA2 = 12
A1 = 6311
A2 = 19929
def parse_line(line):
coords = splitstrip(line, "->")
raw_points = [lmap(int, splitstrip(coord, ",")) for coord in coords]
points = lmap(lambda x: aoc.Point(*x), raw_points)
return points
def make_line(instr):
a, b = instr
points = set()
if a.x == b.x:
if a.y < b.y:
fills = range(a.y, b.y + 1)
for n in fills:
points.add(aoc.Point(x=a.x, y=n))
elif a.y > b.y:
fills = range(b.y, a.y + 1)
for n in fills:
points.add(aoc.Point(x=a.x, y=n))
else:
points.add(aoc.Point(x=a.x, y=a.y))
elif a.y == b.y:
if a.x < b.x:
fills = range(a.x, b.x + 1)
for n in fills:
points.add(aoc.Point(x=n, y=a.y))
elif a.x > b.x:
fills = range(b.x, a.x + 1)
for n in fills:
points.add(aoc.Point(x=n, y=a.y))
else:
points.add(aoc.Point(x=a.x, y=a.y))
return points
def make_line_diag(instr):
a, b = instr
points = set()
if a.x == b.x:
if a.y < b.y:
fills = range(a.y, b.y + 1)
for n in fills:
points.add(aoc.Point(x=a.x, y=n))
elif a.y > b.y:
fills = range(b.y, a.y + 1)
for n in fills:
points.add(aoc.Point(x=a.x, y=n))
else:
points.add(aoc.Point(x=a.x, y=a.y))
elif a.y == b.y:
if a.x < b.x:
fills = range(a.x, b.x + 1)
for n in fills:
points.add(aoc.Point(x=n, y=a.y))
elif a.x > b.x:
fills = range(b.x, a.x + 1)
for n in fills:
points.add(aoc.Point(x=n, y=a.y))
else:
points.add(aoc.Point(x=a.x, y=a.y))
else:
curr = a
points.add(curr)
while curr != b:
if curr.x < b.x:
curr = curr + aoc.Point(1, 0)
elif curr.x > b.x:
curr = curr - aoc.Point(1, 0)
if curr.y < b.y:
curr = curr + aoc.Point(0, 1)
elif curr.y > b.y:
curr = curr - aoc.Point(0, 1)
points.add(curr)
return points
def detect_overlap(lines):
llines = lmap(list, lines)
counted = Counter(lconcat(llines))
overone = {k: v for k, v in counted.items() if v >= 2}
return len(overone)
def process_one(data):
lines = lcompact(lmap(make_line, data))
count = detect_overlap(lines)
return count
def process_two(data):
lines = lcompact(lmap(make_line_diag, data))
count = detect_overlap(lines)
return count
def cli_main() -> None:
input_funcs = [splitstriplines, partial(lmap, parse_line)]
data = aoc.load_and_process_input(INPUT, input_funcs)
aoc.run_tests(TEST, TA1, TA2, A1, input_funcs, process_one, process_two)
result_one = process_one(data)
result_two = process_two(data)
aoc.finish(result_one, A1, result_two, A2)
if __name__ == "__main__":
cli_main()
"""
--- Day 5: Hydrothermal Venture ---
You come across a field of hydrothermal vents on the ocean floor! These vents
constantly produce large, opaque clouds, so it would be best to avoid them if
possible.
They tend to form in lines; the submarine helpfully produces a list of nearby
lines of vents (your puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each line of vents is given as a line segment in the format x1,y1 -> x2,y2
where x1,y1 are the coordinates of one end the line segment and x2,y2 are the
coordinates of the other end. These line segments include the points at both
ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 =
x2 or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the
following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9.
Each position is shown as the number of lines which cover that point or . if no
line covers that point. The top-left pair of 1s, for example, comes from 2,2 ->
2,1; the very bottom row is formed by the overlapping lines 0,9 -> 5,9 and 0,9
-> 2,9.
To avoid the most dangerous areas, you need to determine the number of points
where at least two lines overlap. In the above example, this is anywhere in the
diagram with a 2 or larger - a total of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two
lines overlap?
Your puzzle answer was 6311.
--- Part Two ---
Unfortunately, considering only horizontal and vertical lines doesn't give you
the full picture; you need to also consider diagonal lines.
Because of the limits of the hydrothermal vent mapping system, the lines in
your list will only ever be horizontal, vertical, or a diagonal line at exactly
45 degrees. In other words:
An entry like 1,1 -> 3,3 covers points 1,1, 2,2, and 3,3. An entry like
9,7 -> 7,9 covers points 9,7, 8,8, and 7,9.
Considering all lines from the above example would now produce the following
diagram:
1.1....11.
.111...2..
..2.1.111.
...1.2.2..
.112313211
...1.2....
..1...1...
.1.....1..
1.......1.
222111....
You still need to determine the number of points where at least two lines
overlap. In the above example, this is still anywhere in the diagram with a 2
or larger - now a total of 12 points.
Consider all of the lines. At how many points do at least two lines overlap?
Your puzzle answer was 19929.
"""
| 27.493213
| 79
| 0.597597
|
150aefabe4df5271deca4e75545c64a03ae64d8c
| 14,282
|
py
|
Python
|
learning/pg_agent.py
|
arihant72600/DeepMimic
|
c8cda14662bfa23737c6c648507da66ac392314b
|
[
"MIT"
] | null | null | null |
learning/pg_agent.py
|
arihant72600/DeepMimic
|
c8cda14662bfa23737c6c648507da66ac392314b
|
[
"MIT"
] | null | null | null |
learning/pg_agent.py
|
arihant72600/DeepMimic
|
c8cda14662bfa23737c6c648507da66ac392314b
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from learning.tf_agent import TFAgent
from learning.solvers.mpi_solver import MPISolver
import learning.tf_util as TFUtil
import learning.nets.net_builder as NetBuilder
from learning.tf_distribution_gaussian_diag import TFDistributionGaussianDiag
import learning.rl_util as RLUtil
from util.logger import Logger
import util.mpi_util as MPIUtil
import util.math_util as MathUtil
from env.action_space import ActionSpace
from env.env import Env
'''
Policy Gradient Agent
'''
class PGAgentTorch(nn.Module):
def __init__(self):
super(PGAgentTorch, self).__init__()
class PGAgent(TFAgent):
NAME = 'PG'
ACTOR_NET_KEY = 'ActorNet'
ACTOR_STEPSIZE_KEY = 'ActorStepsize'
ACTOR_MOMENTUM_KEY = 'ActorMomentum'
ACTOR_WEIGHT_DECAY_KEY = 'ActorWeightDecay'
ACTOR_INIT_OUTPUT_SCALE_KEY = 'ActorInitOutputScale'
CRITIC_NET_KEY = 'CriticNet'
CRITIC_STEPSIZE_KEY = 'CriticStepsize'
CRITIC_MOMENTUM_KEY = 'CriticMomentum'
CRITIC_WEIGHT_DECAY_KEY = 'CriticWeightDecay'
MAIN_SCOPE = "main"
EXP_ACTION_FLAG = 1 << 0
def __init__(self, world, id, json_data):
self._exp_action = False
super().__init__(world, id, json_data)
return
def reset(self):
super().reset()
self._exp_action = False
return
def _check_action_space(self):
action_space = self.get_action_space()
return action_space == ActionSpace.Continuous
def _load_params(self, json_data):
super()._load_params(json_data)
self.val_min, self.val_max = self._calc_val_bounds(self.discount)
self.val_fail, self.val_succ = self._calc_term_vals(self.discount)
return
def _build_nets(self, json_data):
assert self.ACTOR_NET_KEY in json_data
assert self.CRITIC_NET_KEY in json_data
actor_net_name = json_data[self.ACTOR_NET_KEY]
critic_net_name = json_data[self.CRITIC_NET_KEY]
actor_init_output_scale = 1 if (self.ACTOR_INIT_OUTPUT_SCALE_KEY not in json_data) else json_data[self.ACTOR_INIT_OUTPUT_SCALE_KEY]
s_size = self.get_state_size()
g_size = self.get_goal_size()
a_size = self.get_action_size()
# setup input tensors
self._s_ph = tf.placeholder(tf.float32, shape=[None, s_size], name="s") # observations
self._tar_val_ph = tf.placeholder(tf.float32, shape=[None], name="tar_val") # target value s
self._adv_ph = tf.placeholder(tf.float32, shape=[None], name="adv") # advantage
self._a_ph = tf.placeholder(tf.float32, shape=[None, a_size], name="a") # target actions
self._g_ph = tf.placeholder(tf.float32, shape=([None, g_size] if self.has_goal() else None), name="g") # goals
with tf.variable_scope(self.MAIN_SCOPE):
self._norm_a_pd_tf = self._build_net_actor(actor_net_name, self._get_actor_inputs(), actor_init_output_scale)
self._critic_tf = self._build_net_critic(critic_net_name, self._get_critic_inputs())
if (self.actor_tf != None):
Logger.print('Built actor net: ' + actor_net_name)
if (self.critic_tf != None):
Logger.print('Built critic net: ' + critic_net_name)
sample_norm_a_tf = self._norm_a_pd_tf.sample()
self._sample_a_tf = self._a_norm.unnormalize_tf(sample_norm_a_tf)
self._sample_a_logp_tf = self._norm_a_pd_tf.logp(sample_norm_a_tf)
mode_norm_a_tf = self._norm_a_pd_tf.get_mode()
self._mode_a_tf = self._a_norm.unnormalize_tf(mode_norm_a_tf)
self._mode_a_logp_tf = self._norm_a_pd_tf.logp(mode_norm_a_tf)
norm_tar_a_tf = self._a_norm.normalize_tf(self._a_tf)
self._a_logp_tf = self._norm_a_pd_tf.logp(norm_tar_a_tf)
return
def _build_losses(self, json_data):
actor_bound_loss_weight = 10.0
actor_weight_decay = 0 if (self.ACTOR_WEIGHT_DECAY_KEY not in json_data) else json_data[self.ACTOR_WEIGHT_DECAY_KEY]
critic_weight_decay = 0 if (self.CRITIC_WEIGHT_DECAY_KEY not in json_data) else json_data[self.CRITIC_WEIGHT_DECAY_KEY]
val_diff = self._tar_val_tf - self._critic_tf
self._critic_loss_tf = 0.5 * tf.reduce_mean(tf.square(val_diff))
if (critic_weight_decay != 0):
self._critic_loss_tf += critic_weight_decay * self._weight_decay_loss(self.MAIN_SCOPE + '/critic')
self._actor_loss_tf = self._adv_ph * self._a_logp_tf
self._actor_loss_tf = -tf.reduce_mean(self._actor_loss_tf)
if (actor_bound_loss_weight != 0.0):
self._actor_loss_tf += actor_bound_loss_weight * self._build_action_bound_loss(self._norm_a_pd_tf)
if (actor_weight_decay != 0):
self.actor_loss_tf += actor_weight_decay * self._weight_decay_loss(self.MAIN_SCOPE + '/actor')
return
def _build_solvers(self, json_data):
actor_stepsize = 0.001 if (self.ACTOR_STEPSIZE_KEY not in json_data) else json_data[self.ACTOR_STEPSIZE_KEY]
actor_momentum = 0.9 if (self.ACTOR_MOMENTUM_KEY not in json_data) else json_data[self.ACTOR_MOMENTUM_KEY]
critic_stepsize = 0.01 if (self.CRITIC_STEPSIZE_KEY not in json_data) else json_data[self.CRITIC_STEPSIZE_KEY]
critic_momentum = 0.9 if (self.CRITIC_MOMENTUM_KEY not in json_data) else json_data[self.CRITIC_MOMENTUM_KEY]
critic_vars = self._tf_vars(self.MAIN_SCOPE + '/critic')
critic_opt = tf.train.MomentumOptimizer(learning_rate=critic_stepsize, momentum=critic_momentum)
self._critic_grad_tf = tf.gradients(self._critic_loss_tf, critic_vars)
self._critic_solver = MPISolver(self.sess, critic_opt, critic_vars)
actor_vars = self._tf_vars(self.MAIN_SCOPE + '/actor')
actor_opt = tf.train.MomentumOptimizer(learning_rate=actor_stepsize, momentum=actor_momentum)
self._actor_grad_tf = tf.gradients(self._actor_loss_tf, actor_vars)
self._actor_solver = MPISolver(self.sess, actor_opt, actor_vars)
return
def _build_net_actor(self, net_name, input_tfs, init_output_scale, reuse=False):
with tf.variable_scope('actor', reuse=reuse):
h = NetBuilder.build_net(net_name, input_tfs, reuse)
std_type = TFDistributionGaussianDiag.StdType.Default
a_size = self.get_action_size()
mean_kernel_init = tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale)
mean_bias_init = tf.zeros_initializer()
logstd_kernel_init = tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale)
logstd_bias_init = np.log(self.exp_params_curr.noise) * np.ones(a_size)
logstd_bias_init = logstd_bias_init.astype(np.float32)
norm_a_pd_tf = TFDistributionGaussianDiag(input=h, dim=a_size, std_type=std_type,
mean_kernel_init=mean_kernel_init, mean_bias_init=mean_bias_init,
logstd_kernel_init=logstd_kernel_init, logstd_bias_init=logstd_bias_init,
reuse=reuse)
return norm_a_pd_tf
def _build_net_critic(self, net_name, input_tfs, reuse=False):
out_size = 1
with tf.variable_scope('critic', reuse=reuse):
h = NetBuilder.build_net(net_name, input_tfs, reuse)
val_tf = tf.layers.dense(inputs=h, units=out_size, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse)
val_tf = tf.squeeze(val_tf, axis=-1)
return val_tf
def _get_actor_inputs(self):
norm_s_tf = self._s_norm.normalize_tf(self._s_ph)
input_tfs = [norm_s_tf]
if (self.has_goal()):
norm_g_tf = self._g_norm.normalize_tf(self._g_ph)
input_tfs += [norm_g_tf]
return input_tfs
def _get_critic_inputs(self):
norm_s_tf = self._s_norm.normalize_tf(self._s_ph)
input_tfs = [norm_s_tf]
if (self.has_goal()):
norm_g_tf = self._g_norm.normalize_tf(self._g_ph)
input_tfs += [norm_g_tf]
return input_tfs
def _build_action_bound_loss(self, norm_a_pd_tf):
norm_a_bound_min = self._a_norm.normalize(self._a_bound_min)
norm_a_bound_max = self._a_norm.normalize(self._a_bound_max)
if (isinstance(norm_a_pd_tf, TFDistributionGaussianDiag)):
logstd_min = -np.inf
logstd_max = np.inf
norm_a_logstd_min = logstd_min * np.ones_like(norm_a_bound_min)
norm_a_logstd_max = logstd_max * np.ones_like(norm_a_bound_max)
norm_a_bound_min = np.concatenate([norm_a_bound_min, norm_a_logstd_min], axis=-1)
norm_a_bound_max = np.concatenate([norm_a_bound_max, norm_a_logstd_max], axis=-1)
a_bound_loss = norm_a_pd_tf.param_bound_loss(norm_a_bound_min, norm_a_bound_max)
return a_bound_loss
def _initialize_vars(self):
super()._initialize_vars()
self._sync_solvers()
return
def _sync_solvers(self):
self._actor_solver.sync()
self._critic_solver.sync()
return
def _decide_action(self, s, g):
with self.sess.as_default(), self.graph.as_default():
self._exp_action = self._enable_stoch_policy() and MathUtil.flip_coin(self.exp_params_curr.rate)
a, logp = self._eval_actor(s, g, self._exp_action)
a = a[0]
logp = logp[0]
return a, logp
def _enable_stoch_policy(self):
return self.enable_training and (self._mode == self.Mode.TRAIN or self._mode == self.Mode.TRAIN_END)
def _eval_actor(self, s, g, exp_action):
s = np.reshape(s, [-1, self.get_state_size()])
g = np.reshape(g, [-1, self.get_goal_size()]) if self.has_goal() else None
feed = {
self._s_ph : s,
self._g_ph : g
}
if (exp_action):
run_tfs = [self._sample_a_tf, self._sample_a_logp_tf]
else:
run_tfs = [self._mode_a_tf, self._mode_a_logp_tf]
a, logp = self.sess.run(run_tfs, feed_dict=feed)
return a, logp
def _eval_critic(self, s, g):
s = np.reshape(s, [-1, self.get_state_size()])
g = np.reshape(g, [-1, self.get_goal_size()]) if self.has_goal() else None
feed = {
self._s_ph : s,
self._g_ph : g
}
val = self.sess.run(self._critic_tf, feed_dict=feed)
return val
def _record_flags(self):
flags = int(0)
if (self._exp_action):
flags = flags | self.EXP_ACTION_FLAG
return flags
def _train_step(self):
super()._train_step()
critic_loss = self._update_critic()
actor_loss = self._update_actor()
critic_loss = MPIUtil.reduce_avg(critic_loss)
actor_loss = MPIUtil.reduce_avg(actor_loss)
critic_stepsize = self.critic_solver.get_stepsize()
actor_stepsize = self.actor_solver.get_stepsize()
self.logger.log_tabular('Critic_Loss', critic_loss)
self.logger.log_tabular('Critic_Stepsize', critic_stepsize)
self.logger.log_tabular('Actor_Loss', actor_loss)
self.logger.log_tabular('Actor_Stepsize', actor_stepsize)
return
def _update_critic(self):
idx = self.replay_buffer.sample(self._local_mini_batch_size)
s = self.replay_buffer.get('states', idx)
g = self.replay_buffer.get('goals', idx) if self.has_goal() else None
tar_vals = self._calc_updated_vals(idx)
tar_vals = np.clip(tar_vals, self.val_min, self.val_max)
feed = {
self._s_ph: s,
self._g_ph: g,
self._tar_val_ph: tar_vals
}
loss, grads = self.sess.run([self.critic_loss_tf, self.critic_grad_tf], feed)
self.critic_solver.update(grads)
return loss
def _update_actor(self):
key = self.EXP_ACTION_FLAG
idx = self.replay_buffer.sample_filtered(self._local_mini_batch_size, key)
has_goal = self.has_goal()
s = self.replay_buffer.get('states', idx)
g = self.replay_buffer.get('goals', idx) if has_goal else None
a = self.replay_buffer.get('actions', idx)
V_new = self._calc_updated_vals(idx)
V_old = self._eval_critic(s, g)
adv = V_new - V_old
feed = {
self._s_ph: s,
self._g_ph: g,
self._a_ph: a,
self._adv_ph: adv
}
loss, grads = self.sess.run([self._actor_loss_tf, self._actor_grad_tf], feed)
self._actor_solver.update(grads)
return loss
def _calc_updated_vals(self, idx):
r = self.replay_buffer.get('rewards', idx)
if self.discount == 0:
new_V = r
else:
next_idx = self.replay_buffer.get_next_idx(idx)
s_next = self.replay_buffer.get('states', next_idx)
g_next = self.replay_buffer.get('goals', next_idx) if self.has_goal() else None
is_end = self.replay_buffer.is_path_end(idx)
is_fail = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Fail)
is_succ = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Succ)
is_fail = np.logical_and(is_end, is_fail)
is_succ = np.logical_and(is_end, is_succ)
V_next = self._eval_critic(s_next, g_next)
V_next[is_fail] = self.val_fail
V_next[is_succ] = self.val_succ
new_V = r + self.discount * V_next
return new_V
def _log_val(self, s, g):
val = self._eval_critic(s, g)
norm_val = (1.0 - self.discount) * val
self.world.env.log_val(self.id, norm_val[0])
return
def _build_replay_buffer(self, buffer_size):
super()._build_replay_buffer(buffer_size)
self.replay_buffer.add_filter_key(self.EXP_ACTION_FLAG)
return
| 39.128767
| 139
| 0.658171
|
873fdbd0f5f40d1993f95429144f65c1f494c5d2
| 1,128
|
py
|
Python
|
perceiver/tokenizer.py
|
felixyu7/perceiver-io-1
|
895f09e75e5a4b5e90dfef5d3a86ea26c2f48f4e
|
[
"Apache-2.0"
] | null | null | null |
perceiver/tokenizer.py
|
felixyu7/perceiver-io-1
|
895f09e75e5a4b5e90dfef5d3a86ea26c2f48f4e
|
[
"Apache-2.0"
] | null | null | null |
perceiver/tokenizer.py
|
felixyu7/perceiver-io-1
|
895f09e75e5a4b5e90dfef5d3a86ea26c2f48f4e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Iterable
from tokenizers import decoders, Tokenizer
from tokenizers.models import WordPiece
from tokenizers.normalizers import Normalizer, Sequence, Lowercase, StripAccents, NFD
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import WordPieceTrainer
PAD_TOKEN = '[PAD]'
PAD_TOKEN_ID = 0
UNK_TOKEN = '[UNK]'
UNK_TOKEN_ID = 1
MASK_TOKEN = '[MASK]'
MASK_TOKEN_ID = 2
SPECIAL_TOKENS = [PAD_TOKEN, UNK_TOKEN, MASK_TOKEN]
def load_tokenizer(path):
return Tokenizer.from_file(path)
def save_tokenizer(tokenizer: Tokenizer, path):
tokenizer.save(path)
def train_tokenizer(tokenizer: Tokenizer, data: Iterable[str], vocab_size):
trainer = WordPieceTrainer(vocab_size=vocab_size, special_tokens=SPECIAL_TOKENS)
tokenizer.train_from_iterator(data, trainer)
def create_tokenizer(*normalizer: Normalizer):
tokenizer = Tokenizer(WordPiece(unk_token=UNK_TOKEN))
tokenizer.normalizer = Sequence(list(normalizer) + [NFD(), Lowercase(), StripAccents()])
tokenizer.pre_tokenizer = Whitespace()
tokenizer.decoder = decoders.WordPiece()
return tokenizer
| 27.512195
| 92
| 0.781915
|
077030d214f19b1bdeff2b6afe9b9fcaed2aefe0
| 1,756
|
py
|
Python
|
differential_game_exp.py
|
Faiz/mapr2
|
30fb37e1807d47f3678b5cab80ac60c74c4e37f7
|
[
"Apache-2.0"
] | 1
|
2021-09-03T16:33:12.000Z
|
2021-09-03T16:33:12.000Z
|
differential_game_exp.py
|
Faiz/mapr2
|
30fb37e1807d47f3678b5cab80ac60c74c4e37f7
|
[
"Apache-2.0"
] | null | null | null |
differential_game_exp.py
|
Faiz/mapr2
|
30fb37e1807d47f3678b5cab80ac60c74c4e37f7
|
[
"Apache-2.0"
] | null | null | null |
from pgagent import PGAgent
from maci.environments.differential_game import DifferentialGame
import numpy as np
import tensorflow as tf
GAME_NAME = "ma_softq"
AGENT_NUM = 2
MOVING_WINDOW_LEN = 5 # 5 mini batches => 5 * T, 500 games.
def play_differential_game(alpha=0.001, beta=0.001, discount=0.9, num_agents=2, episodes=100, iteration=1000):
agents = []
env = DifferentialGame(game_name=GAME_NAME, agent_num=AGENT_NUM)
for i in range(num_agents):
agents.append(PGAgent(i))
device_config = tf.ConfigProto()
with tf.Session(config=device_config) as sess:
for _ in range(iteration):
_ = [agent.start_new_batch() for agent in agents]
for _ in range(episodes):
states = env.reset()
actions = np.array([
agent.act(state) for state, agent in zip(states, agents)
])
# print(actions)
state_primes, rewards, _, _ = env.step(actions)
print(rewards)
for agent_id, agent in enumerate(agents):
agent.save_history(
[
tf.reshape(states[agent_id], [-1, 1]),
actions[agent_id],
actions[1 - agent_id],
state_primes[agent_id],
rewards[agent_id],
]
)
# update P-tsi for each agent.
_ = [agent.update_P(MOVING_WINDOW_LEN) for agent in agents]
# update the parameters.
_ = [agent.update_params() for agent in agents]
if __name__ == "__main__":
# tf.enable_eager_execution()
play_differential_game()
| 39.909091
| 110
| 0.553531
|
a1551adfa9824ecb4452145e307ce8dfe0f2443b
| 1,195
|
py
|
Python
|
mini_lambda/__init__.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | null | null | null |
mini_lambda/__init__.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | null | null | null |
mini_lambda/__init__.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | null | null | null |
from mini_lambda.utils_init import __remove_all_external_symbols, __get_all_submodules_symbols
__PACKAGE_NAME = 'mini_lambda'
__SUBMODULES_TO_EXPORT = ['base', 'generated', 'generated2', 'goodies', 'goodies_generated', 'main']
# TODO we could rather rely on a regexp mechanism
# (1) allow users to do
# import <package> as p and then p.<symbol>
__all__ = __get_all_submodules_symbols(__PACKAGE_NAME, __SUBMODULES_TO_EXPORT)
# Note: this is one way to do it, but it would be simpler to check the names in globals() at the end of this file.
# (2) allow users to do
# from <package> import <symbol>
#
# The following works, but unfortunately IDE like pycharm do not understand
from mini_lambda.base import *
from mini_lambda.generated import *
from mini_lambda.goodies_generated import *
from mini_lambda.main import *
from mini_lambda.main import _
from mini_lambda.generated2 import *
from mini_lambda.goodies import *
# remove all symbols that were imported above but do not belong in this package
__remove_all_external_symbols(__PACKAGE_NAME, globals())
# Otherwise exhaustive list would be required, which is sad
# ...
# print(__all__)
# print(globals().keys())
# print('Done')
| 35.147059
| 114
| 0.774895
|
ff480f58b6b4d3a83998b31cad918cebee147fdc
| 2,287
|
py
|
Python
|
ravens/plot.py
|
YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement
|
3759664cd77b5810834937c478a9a44ad36ac90c
|
[
"Apache-2.0"
] | 1
|
2022-03-20T19:03:02.000Z
|
2022-03-20T19:03:02.000Z
|
ravens/plot.py
|
YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement
|
3759664cd77b5810834937c478a9a44ad36ac90c
|
[
"Apache-2.0"
] | null | null | null |
ravens/plot.py
|
YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement
|
3759664cd77b5810834937c478a9a44ad36ac90c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to plot training results."""
import os
import pickle
from absl import app
from absl import flags
import numpy as np
from ravens.utils import utils
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('root_dir', '.', '')
flags.DEFINE_bool('disp', True, 'Whether to display the environment.')
flags.DEFINE_string('task', 'insertion', 'The task to run.')
flags.DEFINE_string('agent', 'transporter', 'The agent to run.')
flags.DEFINE_integer('n_demos', 100, 'Number of demos to run.')
def main(unused_argv):
name = f'{FLAGS.task}-{FLAGS.agent}-{FLAGS.n_demos}-'
print(name)
# Load and print results to console.
path = FLAGS.root_dir
print(path)
curve = []
for fname in sorted(tf.io.gfile.listdir(path)):
fname = os.path.join(path, fname)
if name in fname and '.pkl' in fname:
n_steps = int(fname[(fname.rfind('-') + 1):-4])
data = pickle.load(open(fname, 'rb'))
rewards = []
for reward, _ in data:
rewards.append(reward)
score = np.mean(rewards)
std = np.std(rewards)
print(f' {n_steps} steps:\t{score:.1f}%\t± {std:.1f}%')
curve.append((n_steps, score, std))
# Plot results over training steps.
title = f'{FLAGS.agent} on {FLAGS.task} w/ {FLAGS.n_demos} demos'
ylabel = 'Testing Task Success (%)'
xlabel = '# of Training Steps'
if FLAGS.disp:
logs = {}
curve = np.array(curve)
print(curve)
logs[name] = (curve[:, 0], curve[:, 1], curve[:, 2])
fname = os.path.join(path, f'{name}-plot.png')
utils.plot(fname, title, ylabel, xlabel, data=logs, ylim=[0, 1])
print(f'Done. Plot image saved to: {fname}')
if __name__ == '__main__':
app.run(main)
| 31.763889
| 74
| 0.675557
|
0c204e70cb10f2db4c018111a27532d82126da7d
| 5,235
|
py
|
Python
|
flask-sp/flask_sp/saml/metadata.py
|
ThePalaceProject/circulation-saml-test
|
3c150e7d162b41c661ec99d70977b64ce90eb24b
|
[
"MIT"
] | null | null | null |
flask-sp/flask_sp/saml/metadata.py
|
ThePalaceProject/circulation-saml-test
|
3c150e7d162b41c661ec99d70977b64ce90eb24b
|
[
"MIT"
] | null | null | null |
flask-sp/flask_sp/saml/metadata.py
|
ThePalaceProject/circulation-saml-test
|
3c150e7d162b41c661ec99d70977b64ce90eb24b
|
[
"MIT"
] | null | null | null |
import logging
import os
import click
from defusedxml.lxml import fromstring
from flask.cli import with_appcontext
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from onelogin.saml2.xml_utils import OneLogin_Saml2_XML
from flask_sp.db import IdentityProviderMetadata, get_db
class MetadataManager:
IN_COMMON_METADATA_SERVICE_URL = 'http://md.incommon.org/InCommon/InCommon-metadata-idp-only.xml'
ENTITY_DESCRIPTOR_XPATH = '//md:EntityDescriptor'
IDP_DESCRIPTOR_XPATH = './md:IDPSSODescriptor'
ENTITY_ID_ATTRIBUTE = 'entityID'
DISPLAY_NAME_XPATH = './md:Extensions/mdui:UIInfo/mdui:DisplayName'
def __init__(self):
self._logger = logging.getLogger(__name__)
OneLogin_Saml2_Constants.NS_PREFIX_MDUI = 'mdui'
OneLogin_Saml2_Constants.NS_MDUI = 'urn:oasis:names:tc:SAML:metadata:ui'
OneLogin_Saml2_Constants.NSMAP[OneLogin_Saml2_Constants.NS_PREFIX_MDUI] = OneLogin_Saml2_Constants.NS_MDUI
def _fetch_metadata(self):
self._logger.info('Started fetching metadata from InCommon Metadata service')
metadata = OneLogin_Saml2_IdPMetadataParser.get_metadata(self.IN_COMMON_METADATA_SERVICE_URL)
self._logger.info('Finished fetching metadata from InCommon Metadata service')
return metadata
def _convert_string_to_xml_dom(self, metadata):
self._logger.info('Started converting string containing IdP metadata into XML DOM')
metadata_dom = fromstring(metadata, forbid_dtd=True)
self._logger.info('Finished converting string containing IdP metadata into XML DOM')
return metadata_dom
def _parse_metadata_dom(self, metadata_dom):
entity_descriptor_nodes = OneLogin_Saml2_XML.query(metadata_dom, self.ENTITY_DESCRIPTOR_XPATH)
idps = []
for entity_descriptor_node in entity_descriptor_nodes:
idp_descriptor_nodes = OneLogin_Saml2_XML.query(entity_descriptor_node, self.IDP_DESCRIPTOR_XPATH)
for idp_descriptor_node in idp_descriptor_nodes:
idp_entity_id = entity_descriptor_node.get(self.ENTITY_ID_ATTRIBUTE, None)
display_name_node = OneLogin_Saml2_XML.query(idp_descriptor_node, self.DISPLAY_NAME_XPATH)
if not display_name_node:
continue
display_name = display_name_node[0].text
idp = IdentityProviderMetadata(idp_entity_id, display_name, entity_descriptor_node)
idps.append(idp)
return idps
def _fetch_local_idps(self, local_metadata_path):
local_idp_metadata_file = local_metadata_path
with open(local_idp_metadata_file) as file:
metadata = file.read()
metadata_dom = self._convert_string_to_xml_dom(metadata)
yield from self._parse_metadata_dom(metadata_dom)
def fetch_idps(self, local_metadata_path):
test_idps = []
self._logger.info('Started fetching local IdPs')
try:
for idp in self._fetch_local_idps(local_metadata_path):
test_idps.append(idp)
except:
self._logger.exception('An unexpected error occurred during fetching local IdPs')
self._logger.info(f'Successfully fetched {len(test_idps)} local IdPs')
in_common_idps = []
# self._logger.info('Started fetching IdPs from InCommon Metadata Service')
# try:
# metadata = self._fetch_metadata()
# metadata_dom = self._convert_string_to_xml_dom(metadata)
#
# for idp in self._parse_metadata_dom(metadata_dom):
# in_common_idps.append(idp)
# except:
# self._logger.exception(
# 'An unexpected exception occurred during fetching IdP metadata from InCommon Metadata service')
# raise
#
# self._logger.info('Successfully fetched {0} IdPs from In Common Metadata Service'.format(len(in_common_idps)))
idps = test_idps + in_common_idps
return idps
def init_metadata(local_metadata_path):
click.echo('Deleting the existing metadata...')
db = get_db()
idps = IdentityProviderMetadata.query.all()
for idp in idps:
db.session.delete(idp)
db.session.commit()
click.echo('The existing metadata has been deleted')
metadata_manager = MetadataManager()
click.echo('Fetching metadata...')
idps = metadata_manager.fetch_idps(local_metadata_path)
click.echo(f'Fetched {len(idps)} IdPs')
db.session.add_all(idps)
db.session.commit()
click.echo(f'Saved {len(idps)} IdPs to the database')
@click.command('init-metadata')
@with_appcontext
@click.argument('local_metadata_path')
def init_metadata_command(local_metadata_path):
"""Adds metadata to the database
:param local_metadata_path: Absolute path to the XML file containing IdP metadata
:type local_metadata_path: string
"""
init_metadata(local_metadata_path)
def init_app(app):
# app.before_first_request(init_metadata)
app.cli.add_command(init_metadata_command)
| 33.557692
| 120
| 0.714613
|
605b688b3056264f3939673855c721718b4004e9
| 251
|
py
|
Python
|
grpc_benchmark/multiple_clients.py
|
mpetyx/python-rpc-frameworks-comparison
|
5b69a5aef1f8ead2fb88e744b6b1787b27165898
|
[
"MIT"
] | null | null | null |
grpc_benchmark/multiple_clients.py
|
mpetyx/python-rpc-frameworks-comparison
|
5b69a5aef1f8ead2fb88e744b6b1787b27165898
|
[
"MIT"
] | null | null | null |
grpc_benchmark/multiple_clients.py
|
mpetyx/python-rpc-frameworks-comparison
|
5b69a5aef1f8ead2fb88e744b6b1787b27165898
|
[
"MIT"
] | null | null | null |
__author__ = 'mpetyx (Michael Petychakis)'
__version__ = "1.0.0"
__maintainer__ = "Michael Petychakis"
__email__ = "hello@apilama.com"
__status__ = "Production"
from client import Client
for number in range(1,100000):
Client().squareRoot(number)
| 25.1
| 42
| 0.752988
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.