hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5e56f9248eb4754689742e2d033991059fb377b
| 4,475
|
py
|
Python
|
alipay/aop/api/domain/AlipayMarketingCampaignDiscountBudgetCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayMarketingCampaignDiscountBudgetCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayMarketingCampaignDiscountBudgetCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCampaignDiscountBudgetCreateModel(object):
def __init__(self):
self._biz_from = None
self._fund_type = None
self._gmt_end = None
self._name = None
self._out_biz_no = None
self._out_budget_no = None
self._publisher_logon_id = None
self._total_amount = None
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def fund_type(self):
return self._fund_type
@fund_type.setter
def fund_type(self, value):
self._fund_type = value
@property
def gmt_end(self):
return self._gmt_end
@gmt_end.setter
def gmt_end(self, value):
self._gmt_end = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def out_budget_no(self):
return self._out_budget_no
@out_budget_no.setter
def out_budget_no(self, value):
self._out_budget_no = value
@property
def publisher_logon_id(self):
return self._publisher_logon_id
@publisher_logon_id.setter
def publisher_logon_id(self, value):
self._publisher_logon_id = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.fund_type:
if hasattr(self.fund_type, 'to_alipay_dict'):
params['fund_type'] = self.fund_type.to_alipay_dict()
else:
params['fund_type'] = self.fund_type
if self.gmt_end:
if hasattr(self.gmt_end, 'to_alipay_dict'):
params['gmt_end'] = self.gmt_end.to_alipay_dict()
else:
params['gmt_end'] = self.gmt_end
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.out_budget_no:
if hasattr(self.out_budget_no, 'to_alipay_dict'):
params['out_budget_no'] = self.out_budget_no.to_alipay_dict()
else:
params['out_budget_no'] = self.out_budget_no
if self.publisher_logon_id:
if hasattr(self.publisher_logon_id, 'to_alipay_dict'):
params['publisher_logon_id'] = self.publisher_logon_id.to_alipay_dict()
else:
params['publisher_logon_id'] = self.publisher_logon_id
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignDiscountBudgetCreateModel()
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'fund_type' in d:
o.fund_type = d['fund_type']
if 'gmt_end' in d:
o.gmt_end = d['gmt_end']
if 'name' in d:
o.name = d['name']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'out_budget_no' in d:
o.out_budget_no = d['out_budget_no']
if 'publisher_logon_id' in d:
o.publisher_logon_id = d['publisher_logon_id']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
| 30.650685
| 87
| 0.593296
|
32d92dee3a88533855b14b7f272bea2d493952c5
| 312
|
py
|
Python
|
cwr/manage.py
|
ghinda/chrome-webstore-ranking
|
4a508d30d34ecd348ca506bd914f556dc89e6f80
|
[
"MIT"
] | 1
|
2021-05-16T18:03:13.000Z
|
2021-05-16T18:03:13.000Z
|
cwr/manage.py
|
ghinda/chrome-webstore-ranking
|
4a508d30d34ecd348ca506bd914f556dc89e6f80
|
[
"MIT"
] | null | null | null |
cwr/manage.py
|
ghinda/chrome-webstore-ranking
|
4a508d30d34ecd348ca506bd914f556dc89e6f80
|
[
"MIT"
] | null | null | null |
import logging
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from cwr.app import app, db
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
logger = logging.getLogger(__file__)
if __name__ == "__main__":
manager.run()
| 20.8
| 53
| 0.766026
|
e22362aa67115de6cc5a043c786b6c9a8c81877d
| 9,281
|
py
|
Python
|
tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/executor_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | 1,813
|
2019-02-04T17:17:30.000Z
|
2022-03-29T13:39:30.000Z
|
tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/executor_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | 2,710
|
2019-02-14T00:41:00.000Z
|
2022-03-31T07:23:00.000Z
|
tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/executor_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | 731
|
2019-02-04T17:59:18.000Z
|
2022-03-31T06:45:51.000Z
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_big_query.elwc_example_gen.component.executor."""
from unittest import mock
import apache_beam as beam
from apache_beam.testing import util
from google.cloud import bigquery
import tensorflow as tf
from tfx.extensions.google_cloud_big_query import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.component import executor
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
from google.protobuf import json_format
from google.protobuf import text_format
from tensorflow_serving.apis import input_pb2
_ELWC_1 = text_format.Parse(
"""
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 1
}
}
}
feature {
key: "feature_id_2"
value {
float_list {
value: 1.0
}
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "one"
}
}
}
}
}
context {
features {
feature {
key: "context_feature_1"
value {
int64_list {
value: 1
}
}
}
feature {
key: "context_feature_2"
value {
int64_list {
value: 1
}
}
}
}
}
""", input_pb2.ExampleListWithContext())
_ELWC_2 = text_format.Parse(
"""
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 2
}
}
}
feature {
key: "feature_id_2"
value {
float_list {
value: 2.0
}
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "two"
}
}
}
}
}
context {
features {
feature {
key: "context_feature_1"
value {
int64_list {
value: 1
}
}
}
feature {
key: "context_feature_2"
value {
int64_list {
value: 2
}
}
}
}
}
""", input_pb2.ExampleListWithContext())
_ELWC_3 = text_format.Parse(
"""
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 3
}
}
}
feature {
key: "feature_id_2"
value {
float_list {
value: 3.0
}
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "three"
}
}
}
}
}
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 4
}
}
}
feature {
key: "feature_id_2"
value {
float_list {
value: 4.0
}
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "four"
}
}
}
}
}
context {
features {
feature {
key: "context_feature_1"
value {
int64_list {
value: 2
}
}
}
feature {
key: "context_feature_2"
value {
int64_list {
value: 1
}
}
}
}
}
""", input_pb2.ExampleListWithContext())
# 'context_feature_2' has missing value.
_ELWC_4 = text_format.Parse(
"""
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 5
}
}
}
feature {
key: "feature_id_2"
value {
float_list {
value: 5.0
}
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "five"
}
}
}
}
}
context {
features {
feature {
key: "context_feature_1"
value {
int64_list {
value: 3
}
}
}
feature {
key: "context_feature_2"
value {
}
}
}
}
""", input_pb2.ExampleListWithContext())
# 'feature_id_2' and 'context_feature_2' have missing value.
_ELWC_5 = text_format.Parse(
"""
examples {
features {
feature {
key: "feature_id_1"
value {
int64_list {
value: 5
}
}
}
feature {
key: "feature_id_2"
value {
}
}
feature {
key: "feature_id_3"
value {
bytes_list {
value: "five"
}
}
}
}
}
context {
features {
feature {
key: "context_feature_1"
value {
int64_list {
value: 4
}
}
}
feature {
key: "context_feature_2"
value {
}
}
}
}
""", input_pb2.ExampleListWithContext())
@beam.ptransform_fn
def _MockReadFromBigQuery(pipeline, query):
del query # Unused arg
mock_query_results = [{
'context_feature_1': 1,
'context_feature_2': 1,
'feature_id_1': 1,
'feature_id_2': 1.0,
'feature_id_3': 'one'
}, {
'context_feature_1': 1,
'feature_id_3': 'two',
'feature_id_1': 2,
'context_feature_2': 2,
'feature_id_2': 2.0
}, {
'context_feature_1': 2,
'context_feature_2': 1,
'feature_id_1': 3,
'feature_id_2': 3.0,
'feature_id_3': 'three'
}, {
'context_feature_1': 2,
'context_feature_2': 1,
'feature_id_1': 4,
'feature_id_2': 4.0,
'feature_id_3': ['four']
}, {
'context_feature_1': 3,
'context_feature_2': None,
'feature_id_1': 5,
'feature_id_2': [5.0],
'feature_id_3': 'five'
}, {
'context_feature_1': 4,
'context_feature_2': None,
'feature_id_1': [5],
'feature_id_2': None,
'feature_id_3': 'five'
}]
return pipeline | beam.Create(mock_query_results)
class ExecutorTest(tf.test.TestCase):
def setUp(self):
# Mock BigQuery result schema.
self._schema = [
bigquery.SchemaField('context_feature_1', 'INTEGER', mode='NULLABLE'),
bigquery.SchemaField('context_feature_2', 'INTEGER', mode='NULLABLE'),
bigquery.SchemaField('feature_id_1', 'INTEGER', mode='NULLABLE'),
bigquery.SchemaField('feature_id_2', 'FLOAT', mode='NULLABLE'),
bigquery.SchemaField('feature_id_3', 'STRING', mode='NULLABLE'),
]
super().setUp()
@mock.patch.multiple(
utils,
ReadFromBigQuery=_MockReadFromBigQuery,
)
@mock.patch.object(bigquery, 'Client')
def testBigQueryToElwc(self, mock_client):
# Mock query result schema for _BigQueryElwcConverter.
mock_client.return_value.query.return_value.result.return_value.schema = self._schema
elwc_config = elwc_config_pb2.ElwcConfig(
context_feature_fields=['context_feature_1', 'context_feature_2'])
packed_custom_config = example_gen_pb2.CustomConfig()
packed_custom_config.custom_config.Pack(elwc_config)
with beam.Pipeline() as pipeline:
elwc_examples = (
pipeline | 'ToElwc' >> executor._BigQueryToElwc(
exec_properties={
'_beam_pipeline_args': [],
'custom_config':
json_format.MessageToJson(
packed_custom_config,
preserving_proto_field_name=True)
},
split_pattern='SELECT context_feature_1, context_feature_2, '
'feature_id_1, feature_id_2, feature_id_3 FROM `fake`'))
expected_elwc_examples = [_ELWC_1, _ELWC_2, _ELWC_3, _ELWC_4, _ELWC_5]
util.assert_that(elwc_examples, util.equal_to(expected_elwc_examples))
if __name__ == '__main__':
tf.test.main()
| 23.2025
| 101
| 0.497145
|
0b73bec679360f6a3f5ca1cefd88f6c8fee9a870
| 238
|
py
|
Python
|
GlobalDataset/scripts/runChi_sa.py
|
gehilley/GlobalSteepness
|
62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd
|
[
"MIT"
] | 3
|
2019-09-19T00:04:27.000Z
|
2020-02-17T16:17:55.000Z
|
GlobalDataset/scripts/runChi_sa.py
|
gehilley/GlobalSteepness
|
62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd
|
[
"MIT"
] | null | null | null |
GlobalDataset/scripts/runChi_sa.py
|
gehilley/GlobalSteepness
|
62a1a5b66adb230d5bbbc004aa5d2c5b618a2fdd
|
[
"MIT"
] | 1
|
2020-12-17T07:35:23.000Z
|
2020-12-17T07:35:23.000Z
|
from denudationRateAnalysis import create_chi_grid_for_geographic_prefix as create_chi
prefix = 'sa'
thetas = [0.4, 0.5, 0.6]
Ao = 1000000
basin_lengths = [50000, 100000, 200000, 400000]
create_chi(prefix, thetas, Ao, basin_lengths)
| 21.636364
| 86
| 0.768908
|
7c2382df251ebf6d5ccfd633eff15b723e1cb001
| 5,610
|
py
|
Python
|
scipy/spatial/tests/test_hausdorff.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 366
|
2019-04-07T20:34:48.000Z
|
2022-03-29T07:35:38.000Z
|
scipy/spatial/tests/test_hausdorff.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
scipy/spatial/tests/test_hausdorff.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 61
|
2019-04-08T00:58:14.000Z
|
2022-03-20T23:04:28.000Z
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_almost_equal,
assert_array_equal,
assert_equal,
assert_)
import pytest
from scipy.spatial.distance import directed_hausdorff
from scipy.spatial import distance
from scipy._lib._util import check_random_state
class TestHausdorff(object):
# Test various properties of the directed Hausdorff code.
def setup_method(self):
np.random.seed(1234)
random_angles = np.random.random(100) * np.pi * 2
random_columns = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
random_columns_2 = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
# move one point farther out so we don't have two perfect circles
random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
self.path_1 = random_columns
self.path_2 = random_columns_2
self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1)
self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1)
def test_symmetry(self):
# Ensure that the directed (asymmetric) Hausdorff distance is
# actually asymmetric
forward = directed_hausdorff(self.path_1, self.path_2)[0]
reverse = directed_hausdorff(self.path_2, self.path_1)[0]
assert_(forward != reverse)
def test_brute_force_comparison_forward(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# forward direction.
actual = directed_hausdorff(self.path_1, self.path_2)[0]
# brute force over rows:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_brute_force_comparison_reverse(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# reverse direction.
actual = directed_hausdorff(self.path_2, self.path_1)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_degenerate_case(self):
# The directed Hausdorff distance must be zero if both input
# data arrays match.
actual = directed_hausdorff(self.path_1, self.path_1)[0]
assert_almost_equal(actual, 0.0, decimal=9)
def test_2d_data_forward(self):
# Ensure that 2D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_1[..., :2],
self.path_2[..., :2])[0]
expected = max(np.amin(distance.cdist(self.path_1[..., :2],
self.path_2[..., :2]),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_4d_data_reverse(self):
# Ensure that 4D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_indices(self):
# Ensure that correct point indices are returned -- they should
# correspond to the Hausdorff pair
path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]])
path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]])
actual = directed_hausdorff(path_simple_2, path_simple_1)[1:]
expected = (2, 3)
assert_array_equal(actual, expected)
def test_random_state(self):
# ensure that the global random state is not modified because
# the directed Hausdorff algorithm uses randomization
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
def test_random_state_None_int(self):
# check that seed values of None or int do not alter global
# random state
for seed in [None, 27870671]:
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2, seed)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
def test_invalid_dimensions(self):
# Ensure that a ValueError is raised when the number of columns
# is not the same
np.random.seed(1234)
A = np.random.rand(3, 2)
B = np.random.rand(4, 5)
with pytest.raises(ValueError):
directed_hausdorff(A, B)
| 44.52381
| 79
| 0.630838
|
b15351a8a0d26a86cfde31a182294ae222b5a776
| 6,308
|
py
|
Python
|
transformer_atten/transformer/utils/tokenizer_test.py
|
StuartCHAN/neural-qa
|
42bbd997757bbea57f71398c4dd52d469a6916e9
|
[
"MIT"
] | 5
|
2020-04-08T16:12:27.000Z
|
2021-05-14T14:05:06.000Z
|
transformer_atten/transformer/utils/tokenizer_test.py
|
StuartCHAN/neural-qa
|
42bbd997757bbea57f71398c4dd52d469a6916e9
|
[
"MIT"
] | null | null | null |
transformer_atten/transformer/utils/tokenizer_test.py
|
StuartCHAN/neural-qa
|
42bbd997757bbea57f71398c4dd52d469a6916e9
|
[
"MIT"
] | 1
|
2020-07-03T13:59:11.000Z
|
2020-07-03T13:59:11.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf # pylint: disable=g-bad-import-order
from transformer_atten.transformer.utils import tokenizer
class SubtokenizerTest(tf.test.TestCase):
def _init_subtokenizer(self, vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, "w") as w:
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(tf.test.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual(
"Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def test_split_token_to_subtokens(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{"a": 5, "b": 5, "c": 5, "_": 5, "ab": 5, "bc": 5, "c_": 5,
"abc": 5, "bc_": 5, "abc_": 5}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(
int, {"a": 2, "b": 4, "c": 1, "ab": 6, "ac": 3, "abbc": 5})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual(set(["abbc"]), subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(
int, {"translate": 10, "t": 40, "tr": 16, "tra": 12})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(
token_counts, alphabet, min_count, num_iterations, reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
tf.test.main()
| 34.469945
| 80
| 0.676443
|
d6d0763354355fb4413488e2803ed02fa763f7bf
| 4,005
|
py
|
Python
|
ross/stochastic/st_results_elements.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 69
|
2018-12-26T19:21:26.000Z
|
2022-02-10T08:48:03.000Z
|
ross/stochastic/st_results_elements.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 639
|
2018-12-18T16:44:11.000Z
|
2022-03-27T16:46:41.000Z
|
ross/stochastic/st_results_elements.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 136
|
2019-01-08T12:37:32.000Z
|
2022-03-30T07:14:35.000Z
|
"""Plotting module for elements.
This modules provides functions to plot the elements statistic data.
"""
from copy import copy
import numpy as np
from plotly import graph_objects as go
from plotly import io as pio
from plotly.subplots import make_subplots
from scipy.stats import gaussian_kde
from ross.plotly_theme import tableau_colors
def plot_histogram(
attribute_dict, label={}, var_list=[], histogram_kwargs=None, plot_kwargs=None
):
"""Plot histogram and the PDF.
This function creates a histogram to display the random variable distribution.
Parameters
----------
attribute_dict : dict
Dictionary with element parameters.
label : dict
Dictionary with labels for each element parameter. Labels are displayed
on plotly figure.
var_list : list, optional
List of random variables, in string format, to plot.
histogram_kwargs : dict, optional
Additional key word arguments can be passed to change
the plotly.go.histogram (e.g. histnorm="probability density", nbinsx=20...).
*See Plotly API to more information.
plot_kwargs : dict, optional
Additional key word arguments can be passed to change the plotly go.figure
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0...).
*See Plotly API to more information.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
A figure with the histogram plots.
"""
histogram_kwargs = {} if histogram_kwargs is None else copy(histogram_kwargs)
plot_kwargs = {} if plot_kwargs is None else copy(plot_kwargs)
hist_default_values = dict(
histnorm="probability density",
cumulative_enabled=False,
nbinsx=20,
marker_color=tableau_colors["red"],
opacity=1.0,
)
for k, v in hist_default_values.items():
histogram_kwargs.setdefault(k, v)
plot_default_values = dict(
line=dict(width=4.0, color=tableau_colors["blue"]), opacity=1.0
)
for k, v in plot_default_values.items():
plot_kwargs.setdefault(k, v)
rows = 1 if len(var_list) < 2 else 2
cols = len(var_list) // 2 + len(var_list) % 2
fig = make_subplots(rows=rows, cols=cols)
for i, var in enumerate(var_list):
row = i % 2 + 1
col = i // 2 + 1
if histogram_kwargs["histnorm"] == "probability density":
if histogram_kwargs["cumulative_enabled"] is True:
y_label = "CDF"
else:
y_label = "PDF"
else:
y_label = "Frequency"
fig.add_trace(
go.Histogram(
x=attribute_dict[var],
name="Histogram",
legendgroup="Histogram",
showlegend=True if i == 0 else False,
**histogram_kwargs,
),
row=row,
col=col,
)
if y_label == "PDF":
x = np.linspace(
min(attribute_dict[var]),
max(attribute_dict[var]),
len(attribute_dict[var]),
)
kernel = gaussian_kde(attribute_dict[var])
fig.add_trace(
go.Scatter(
x=x,
y=kernel(x),
mode="lines",
name="PDF Estimation",
legendgroup="PDF Estimation",
showlegend=True if i == 0 else False,
**plot_kwargs,
),
row=row,
col=col,
)
fig.update_xaxes(
title=dict(text="<b>{}</b>".format(label[var])),
exponentformat="E",
row=row,
col=col,
)
fig.update_yaxes(
title=dict(text="<b>{}</b>".format(y_label), standoff=0),
exponentformat="E",
row=row,
col=col,
)
fig.update_layout(bargroupgap=0.1, plot_bgcolor="white")
return fig
| 31.785714
| 84
| 0.571536
|
724747d91a2bdd1a359f0582c9539099a8cba832
| 140
|
py
|
Python
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | 4
|
2018-02-16T10:54:45.000Z
|
2021-07-12T20:41:48.000Z
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | 1
|
2020-10-03T15:23:37.000Z
|
2020-10-03T15:23:37.000Z
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | null | null | null |
from .metatron import Metatron, add_schema_spec
from .version import __version__
__all__ = ['Metatron', 'add_schema_spec', '__version__']
| 23.333333
| 56
| 0.785714
|
6c7a686d74b5fa6ab4c26825bf886d6141919255
| 2,131
|
py
|
Python
|
mods/goog-mail.py
|
tomsec/discover
|
8afec1f4b8f13e11c22f0386fc50eeb762b3492c
|
[
"MIT"
] | null | null | null |
mods/goog-mail.py
|
tomsec/discover
|
8afec1f4b8f13e11c22f0386fc50eeb762b3492c
|
[
"MIT"
] | null | null | null |
mods/goog-mail.py
|
tomsec/discover
|
8afec1f4b8f13e11c22f0386fc50eeb762b3492c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import http.client
import re
import string
import sys
import urllib.request
import urllib.parse
def StripTags(text):
finished = 0
while not finished:
finished = 1
start = text.find(b"<")
if start >= 0:
stop = text[start:].find(b">")
if stop >= 0:
text = text[:start] + text[start+stop+1:]
finished = 0
return text
if len(sys.argv) != 2:
print("\nExtracts emails from Google results.")
print("\nUsage: ./goog-mail.py <domain>")
sys.exit(1)
domain_name = sys.argv[1]
d = {}
page_counter = 0
try:
while page_counter < 50:
results = 'https://groups.google.com/groups?q='+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter) + '&sa=N'
request = urllib.request.Request(results)
request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener = urllib.request.build_opener()
text = opener.open(request).read()
emails = re.findall(rb"([\w\.\-]+@'+domain_name+')", StripTags(text))
for email in emails:
d[email] = 1
uniq_emails = list(d.keys())
page_counter = page_counter + 10
except IOError as e:
print(e)
page_counter_web = 0
try:
while page_counter_web < 50:
results_web = 'https://www.google.com/search?q=%40'+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter_web) + '&sa=N'
request_web = urllib.request.Request(results_web)
request_web.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener_web = urllib.request.build_opener()
text = opener_web.open(request_web).read()
emails_web = re.findall(rb"([\w\.\-]+@'+domain_name+')", StripTags(text))
for email_web in emails_web:
d[email_web] = 1
uniq_emails_web = list(d.keys())
page_counter_web = page_counter_web + 10
except IOError as e:
print(e)
for uniq_emails_web in list(d.keys()):
print(uniq_emails_web+"")
| 31.338235
| 140
| 0.594087
|
0ca910060bea9f6ebfc1bc25413e18f66af8654a
| 9,468
|
py
|
Python
|
devices/DeviceCommon.py
|
hzg-wpi/p05nano
|
af13c97256e754e30512d4c9ef88c287f09dedaa
|
[
"MIT"
] | null | null | null |
devices/DeviceCommon.py
|
hzg-wpi/p05nano
|
af13c97256e754e30512d4c9ef88c287f09dedaa
|
[
"MIT"
] | null | null | null |
devices/DeviceCommon.py
|
hzg-wpi/p05nano
|
af13c97256e754e30512d4c9ef88c287f09dedaa
|
[
"MIT"
] | null | null | null |
import time
import PyTango
import p05.common.TangoFailsaveComm as tfs
# TODO (!check!) - not used. CODE: OK, may leave as it is
class DeviceCommon(object):
def __init__(self, devdict=None):
self.__devdict = devdict
def _tRead(self, tObject, attribute):
"""
DESCRIPTION:
Read tango attribute of a specified tango object.
PARAMETER:
tObject:
Tango Object
attribute:
attribute of the Tango object which is to be read
"""
return tfs.tSaveCommReadAttribute(tObject, attribute, silent=True)
def _tWrite(self, tObject, attribute, value, wait=True):
"""
DESCRIPTION:
Write tango attribute of a specified tango object.
PARAMETER:
tObject:
Tango Object
attribute:
attribute of the Tango object which is to be set
value:
new value for the Tango object's attribute
KEYWORDS:
wait:
waits for Tango device ON state before continuing
default: True
"""
tfs.tSaveCommWriteAttribute(tObject, attribute, value, silent=True,
wait=wait)
return None
def _tExec(self, tObject, command, param=None, wait=True):
"""
DESCRIPTION:
Execute tango command of a specified tango object.
PARAMETER:
tObject:
Tango Object
cmd:
command which is passed to the Tango object.
KEYWORDS:
param=<*>
command parameter, if existing
"""
return tfs.tSaveCommCommand(tObject, command, Argument=param,
silent=True, wait=wait)
# tcmd = getattr(tObject,cmd)
# if param==None:
# tcmd()
# else:
# tcmd(param)
# return None
def _tMotionWait(self, tObject, poll_time=0.1):
"""
DESCRIPTION:
Poll Tango device state until it is not in moving state.
PARAMETER:
tObject:
Tango Object
KEYWORDS:
poll_time=<FLOAT>
poll time interval. Default: 0.1
"""
while tObject.State() == PyTango.DevState.MOVING:
time.sleep(poll_time)
return None
def _MoveMotor(self, tObject, attribute, position=None, wait=True,
relative=False, backlash=None, verbose=True):
"""
DESCRIPTION:
Pass motion command to Tango server.
PARAMETER:
tObject:
Tango Object
attribute:
The Tango Object's attribute
KEYWORDS:
position=<FLOAT>/<STRING>/None:
set <value> to move to a position,
set 'home' to home this motor
set None to read the current position of the motor
wait=<BOOLEAN>:
wait for the motor motion to be complete before command prompt is released.
Default: True
relative=<BOOLEAN>:
move relative to current position. Default: False
backlash=<FLOAT>/None:
move with backlash. Default: None
verbose=<BOOLEAN>:
Print messages on screen.
Default: True
"""
CurrentPosition = self._tRead(tObject, attribute)
if position is None:
current_position = self._tRead(tObject, attribute)
if verbose is True:
print('%s %s: %f' % (tObject, attribute, current_position))
return current_position
else:
if relative is True:
position = CurrentPosition + position
if verbose is True:
print('Move %s from %s to %s.' % (tObject, CurrentPosition,
position))
try:
if not backlash:
self._tWrite(tObject, attribute, position, wait)
#if wait:
# self._tMotionWait(tObject)
else:
self._tWrite(tObject, attribute, position + backlash, wait)
#if wait is True:
# self._tMotionWait(tObject)
self._tWrite(tObject, attribute, position, wait)
#if wait is True:
# self._tMotionWait(tObject)
except (KeyboardInterrupt, SystemExit):
self.stop()
return None
def _HomeAxis(self, tObject, command, wait=True):
self._tExec(tObject, command)
if wait:
self._tMotionWait(tObject)
return None
def pos(self, device='all', verbose=True):
"""
DESCRIPTION:
Returns the position of either all axis or a given axis.
KEYWORDS:
device=<STRING>:
returns the position of axis <STRING>. Use Show() to list device names.
if <STRING>=='all', the position of all devices of this class are shown.
Default: 'all'
verbose=<BOOLEAN>:
Print messages on screen.
Default: True
"""
RetValue = {}
if device == 'all':
for iDev in sorted(self.__devdict):
iDevValue = []
for attribute in self.__devdict[iDev][1]:
if type(attribute) is dict:
pass
else:
iDevValue.append(
self._tRead(self.__devdict[iDev][0], attribute))
if verbose is True:
print("%s %s: %s" % (iDev, attribute, self._tRead(self.__devdict[iDev][0], attribute)))
RetValue[iDev] = iDevValue
else:
iDev = self.__devdict[device]
for attribute in iDev[1]:
if type(attribute) is dict:
pass
else:
RetValue[device] = self._tRead(iDev[0], attribute)
if verbose is True:
print("%s %s: %s" % (device, attribute, self._tRead(iDev[0], attribute)))
return RetValue
def state(self, device='all', verbose=True):
"""
DESCRIPTION:
Returns the State of either all axis or a given axis.
KEYWORDS:
device=<STRING>:
returns the state of axis <STRING>. Use Show() to list device names.
if <STRING>=='all', the position of all devices of this class are shown.
Default: 'all'
verbose=<BOOLEAN>:
Print messages on screen.
Default: True
"""
RetValue = []
if device == 'all':
for iDev in sorted(self.__devdict):
RetValue.append(self._tRead(self.__devdict[iDev][0], 'State'))
if verbose is True:
print("%s: %s" % (iDev, self._tRead(self.__devdict[iDev][0], 'State')))
else:
iDev = self.__devdict[device]
RetValue.append(self._tRead(self.__devdict[iDev][0], 'State'))
if verbose is True:
print('%s: %s ' % (device, self._tRead(iDev[0], 'State')))
return RetValue
def status(self, device='all', verbose=True):
"""
DESCRIPTION:
Returns the Status of either all axis or a given axis.
KEYWORDS:
device=<STRING>:
returns the state of axis <STRING>. Use Show() to list device names.
if <STRING>=='all', the position of all devices of this class are shown.
Default: 'all'
verbose=<BOOLEAN>:
Print messages on screen.
Default: True
"""
RetValue = []
if device == 'all':
for iDev in sorted(self.__devdict):
RetValue.append(self._tRead(self.__devdict[iDev][0], 'Status'))
if verbose is True:
print("%s: %s" % (iDev, self._tRead(self.__devdict[iDev][0], 'Status')))
else:
iDev = self.__devdict[device]
RetValue.append(self._tRead(self.__devdict[iDev][0], 'Status'))
if verbose is True:
print('%s: %s ' % (device, self._tRead(iDev[0], 'Status')))
return RetValue
def stop(self):
"""
DESCRIPTION:
Stops motion of all axis of this class.
"""
for iDev in sorted(self.__devdict):
if self.__devdict[iDev][1][-1]['ismotor'] is False:
pass
else:
self._tExec(self.__devdict[iDev][0], 'StopMove')
return None
def show(self, verbose=True):
"""
DESCRIPTION:
Print a list of all devices and their attributes controlled by this class.
KEYWORDS:
verbose=<BOOLEAN>:
Print messages on screen.
Default: True
"""
RetValue = []
for device in sorted(self.__devdict):
RetValue.append([device, self.__devdict[device][1]])
if verbose is True:
print('Device: %s, %s' % (device, self.__devdict[device][1]))
return RetValue
| 36.697674
| 115
| 0.508872
|
9563f41a47dd2252467072990341667d2fc553a3
| 24
|
py
|
Python
|
poefixer/postprocess/__init__.py
|
trh3/poefixer
|
dafc489ce41d176b0289ba1b48ec82bb6d9e9f07
|
[
"MIT"
] | 18
|
2018-08-02T03:16:12.000Z
|
2021-07-10T22:21:55.000Z
|
poefixer/postprocess/__init__.py
|
trh3/poefixer
|
dafc489ce41d176b0289ba1b48ec82bb6d9e9f07
|
[
"MIT"
] | null | null | null |
poefixer/postprocess/__init__.py
|
trh3/poefixer
|
dafc489ce41d176b0289ba1b48ec82bb6d9e9f07
|
[
"MIT"
] | 11
|
2019-02-19T20:00:46.000Z
|
2022-01-20T22:57:58.000Z
|
from .currency import *
| 12
| 23
| 0.75
|
a6d78ef1d5e0c4dc8499c6a00fa4d0e8b5135aff
| 2,695
|
py
|
Python
|
raytracerchallenge_python/cone.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | 1
|
2020-05-13T20:54:01.000Z
|
2020-05-13T20:54:01.000Z
|
raytracerchallenge_python/cone.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | null | null | null |
raytracerchallenge_python/cone.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | null | null | null |
from raytracerchallenge_python.shape import Shape
from raytracerchallenge_python.intersection import Intersections, Intersection
from raytracerchallenge_python.tuple import Vector
from raytracerchallenge_python.helpers import EPSILON
from math import sqrt
class Cone(Shape):
def __init__(self):
super().__init__()
self.minimum = -float('inf')
self.maximum = float('inf')
self.closed = False
def local_intersect(self, ray):
a = ray.direction.x ** 2 - ray.direction.y ** 2 + ray.direction.z ** 2
b = 2 * ray.origin.x * ray.direction.x \
- 2 * ray.origin.y * ray.direction.y \
+ 2 * ray.origin.z * ray.direction.z
c = ray.origin.x ** 2 - ray.origin.y ** 2 + ray.origin.z ** 2
if abs(a) < EPSILON:
if abs(b) < EPSILON:
return Intersections()
else:
xs = [Intersection(-c / (2 * b), self)] + \
self._intersect_caps(ray)
return Intersections(*xs)
disc = b ** 2 - 4 * a * c
if disc < 0:
return Intersections()
t0 = (-b - sqrt(disc)) / (2 * a)
t1 = (-b + sqrt(disc)) / (2 * a)
xs = []
y0 = ray.origin.y + t0 * ray.direction.y
if self.minimum < y0 and y0 < self.maximum:
xs.append(Intersection(t0, self))
y1 = ray.origin.y + t1 * ray.direction.y
if self.minimum < y1 and y1 < self.maximum:
xs.append(Intersection(t1, self))
xs = xs + self._intersect_caps(ray)
return Intersections(*xs)
def _intersect_caps(self, ray):
def check_cap(ray, t):
x = ray.origin.x + t * ray.direction.x
y = ray.origin.y + t * ray.direction.y
z = ray.origin.z + t * ray.direction.z
return (x ** 2 + z ** 2) <= y ** 2
xs = []
if self.closed is False or abs(ray.direction.y) < EPSILON:
return xs
t = (self.minimum - ray.origin.y) / ray.direction.y
if check_cap(ray, t):
xs.append(Intersection(t, self))
t = (self.maximum - ray.origin.y) / ray.direction.y
if check_cap(ray, t):
xs.append(Intersection(t, self))
return xs
def local_normal_at(self, point):
dist = point.x ** 2 + point.z ** 2
if dist < abs(point.y) and point.y >= self.maximum - EPSILON:
return Vector(0, 1, 0)
elif dist < abs(point.y) and point.y <= self.minimum + EPSILON:
return Vector(0, -1, 0)
y = sqrt(point.x ** 2 + point.z ** 2)
if point.y > 0:
y = -y
return Vector(point.x, y, point.z)
| 31.337209
| 78
| 0.53692
|
144f58350322bcae42e152300778f491908a1576
| 8,860
|
py
|
Python
|
utils/cider/pyciderevalcap/ciderD/ciderD_scorer.py
|
zhaoguangxiang/OFA
|
cc1719df2713f0a046f34acb0afd8782e08ea6be
|
[
"Apache-2.0"
] | 828
|
2020-05-14T21:00:30.000Z
|
2022-03-29T07:21:39.000Z
|
utils/cider/pyciderevalcap/ciderD/ciderD_scorer.py
|
zhaoguangxiang/OFA
|
cc1719df2713f0a046f34acb0afd8782e08ea6be
|
[
"Apache-2.0"
] | 181
|
2020-05-18T13:14:59.000Z
|
2022-03-30T17:45:59.000Z
|
utils/cider/pyciderevalcap/ciderD/ciderD_scorer.py
|
zhaoguangxiang/OFA
|
cc1719df2713f0a046f34acb0afd8782e08ea6be
|
[
"Apache-2.0"
] | 226
|
2020-05-14T20:55:37.000Z
|
2022-03-23T09:45:36.000Z
|
#!/usr/bin/env python
# Tsung-Yi Lin <tl483@cornell.edu>
# Ramakrishna Vedantam <vrama91@vt.edu>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from collections import defaultdict
import numpy as np
import pdb
import math
import six
from six.moves import cPickle
import os
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True)
class CiderScorer(object):
"""CIDEr scorer.
"""
def copy(self):
''' copy the refs.'''
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def copy_empty(self):
new = CiderScorer(df_mode="corpus", n=self.n, sigma=self.sigma)
new.df_mode = self.df_mode
new.ref_len = self.ref_len
new.document_frequency = self.document_frequency
return new
def __init__(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.df_mode = df_mode
self.ref_len = None
if self.df_mode != "corpus":
pkl_file = cPickle.load(open(df_mode,'rb'), **(dict(encoding='latin1') if six.PY3 else {}))
self.ref_len = np.log(float(pkl_file['ref_len']))
self.document_frequency = pkl_file['document_frequency']
else:
self.document_frequency = None
self.cook_append(test, refs)
def clear(self):
self.crefs = []
self.ctest = []
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]):
self.document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
def compute_cider(self):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram,term_freq) in cnts.items():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram)-1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq)*(self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'''
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
'''
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram,count) in vec_hyp[n].items():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n]*norm_ref[n])
assert(not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e**(-(delta**2)/(2*self.sigma**2))
return val
# compute log reference length
if self.df_mode == "corpus":
self.ref_len = np.log(float(len(self.crefs)))
#elif self.df_mode == "coco-val-df":
# if coco option selected, use length of coco-val set
# self.ref_len = np.log(float(40504))
scores = []
for test, refs in zip(self.ctest, self.crefs):
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
# compute idf
if self.df_mode == "corpus":
self.document_frequency = defaultdict(float)
self.compute_doc_freq()
# assert to check document frequency
assert(len(self.ctest) >= max(self.document_frequency.values()))
# import json for now and write the corresponding files
# compute cider score
score = self.compute_cider()
# debug
# print score
return np.mean(np.array(score)), np.array(score)
| 39.730942
| 116
| 0.592664
|
8af2267b0d9b24453b559eba27cf2c8e2555a492
| 1,707
|
py
|
Python
|
test/integration/019_analysis_tests/test_analyses.py
|
tomasfarias/dbt-core
|
ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3
|
[
"Apache-2.0"
] | 3,156
|
2017-03-05T09:59:23.000Z
|
2021-06-30T01:27:52.000Z
|
test/integration/019_analysis_tests/test_analyses.py
|
tomasfarias/dbt-core
|
ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3
|
[
"Apache-2.0"
] | 2,608
|
2017-02-27T15:39:40.000Z
|
2021-06-30T01:49:20.000Z
|
test/integration/019_analysis_tests/test_analyses.py
|
tomasfarias/dbt-core
|
ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3
|
[
"Apache-2.0"
] | 693
|
2017-03-13T03:04:49.000Z
|
2021-06-25T15:57:41.000Z
|
from test.integration.base import DBTIntegrationTest, use_profile, get_manifest
import os
class TestAnalyses(DBTIntegrationTest):
@property
def schema(self):
return "test_analyses_019"
@property
def models(self):
return "models"
def analysis_path(self):
return "analyses"
@property
def project_config(self):
return {
"config-version": 2,
"analysis-paths": [self.analysis_path()]
}
def assert_contents_equal(self, path, expected):
with open(path) as fp:
self.assertEqual(fp.read().strip(), expected)
@use_profile('postgres')
def test_postgres_analyses(self):
compiled_analysis_path = os.path.normpath('target/compiled/test/analyses')
path_1 = os.path.join(compiled_analysis_path, 'my_analysis.sql')
path_2 = os.path.join(compiled_analysis_path, 'raw_stuff.sql')
self.run_dbt(['clean'])
self.assertFalse(os.path.exists(compiled_analysis_path))
results = self.run_dbt(["compile"])
self.assertEqual(len(results), 3)
manifest = get_manifest()
analysis_id = 'analysis.test.my_analysis'
self.assertIn(analysis_id, manifest.nodes)
node = manifest.nodes[analysis_id]
self.assertEqual(node.description, 'This is my analysis')
self.assertTrue(os.path.exists(path_1))
self.assertTrue(os.path.exists(path_2))
expected_sql = 'select * from "{}"."{}"."my_model"'.format(
self.default_database, self.unique_schema()
)
self.assert_contents_equal(path_1, expected_sql)
self.assert_contents_equal(path_2, '{% invalid jinja stuff %}')
| 31.611111
| 82
| 0.652021
|
721666ef70f0e94f243759afc2128c74442caac9
| 1,750
|
py
|
Python
|
app.py
|
masaponto/dentaku2
|
843569efd7df272fd47af7cb51d92413a3250d0a
|
[
"MIT"
] | null | null | null |
app.py
|
masaponto/dentaku2
|
843569efd7df272fd47af7cb51d92413a3250d0a
|
[
"MIT"
] | null | null | null |
app.py
|
masaponto/dentaku2
|
843569efd7df272fd47af7cb51d92413a3250d0a
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
import tensorflow as tf
import numpy as np
from mnist import model
from flask import Flask, render_template, request, jsonify
app = Flask(__name__, template_folder='build', static_folder='build')
x = tf.placeholder("float", [None, 784])
sess = tf.Session()
with tf.variable_scope("convolutional"):
keep_prob = tf.placeholder("float")
y_conv2, variables = model.convolutional(x, keep_prob)
saver = tf.train.Saver(variables)
saver.restore(sess, "mnist/model/cnn_dentaku.ckpt")
operators = {10: '+', 11: '-', 12: '*', 13: '/'}
def fix_output(output):
if output in operators:
return operators[output]
else:
return output
def cnn(input_x):
y = sess.run(y_conv2, feed_dict={
x: input_x, keep_prob: 1.0}).flatten().tolist()
return y.index(max(y))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/estimate', methods=["POST"])
def estimate():
try:
input_x = (
(255 - np.array(request.json["input"])) / 255.0).reshape(1, 784)
output = cnn(input_x)
return jsonify({"estimated": fix_output(output)})
except Exception as e:
print(e)
return jsonify({"error": e})
@app.route('/csv', methods=["POST"])
def feature_to_csv():
try:
input_x = (255 - np.array(request.json["input"])).reshape(1, 784)
print(np.array(request.json["input"]))
print(input_x)
input_csv = [str(x) for x in input_x[0].tolist()]
input_csv = ','.join(input_csv)
return jsonify({"feature": input_csv})
except Exception as e:
print(e)
return jsonify({"error": e})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| 23.648649
| 76
| 0.62
|
3b44d31358b9638edacc7a4612870829ae4abe67
| 1,939
|
py
|
Python
|
aiida/cmdline/params/types/__init__.py
|
hongyi-zhao/aiida-core
|
d6e8c7d10537b754d1cb6334689ba2edf4347d43
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
aiida/cmdline/params/types/__init__.py
|
hongyi-zhao/aiida-core
|
d6e8c7d10537b754d1cb6334689ba2edf4347d43
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
aiida/cmdline/params/types/__init__.py
|
hongyi-zhao/aiida-core
|
d6e8c7d10537b754d1cb6334689ba2edf4347d43
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Provides all parameter types."""
from .calculation import CalculationParamType
from .choice import LazyChoice
from .code import CodeParamType
from .computer import ComputerParamType, ShebangParamType, MpirunCommandParamType
from .config import ConfigOptionParamType
from .data import DataParamType
from .group import GroupParamType
from .identifier import IdentifierParamType
from .multiple import MultipleValueParamType
from .node import NodeParamType
from .process import ProcessParamType
from .strings import (NonEmptyStringParamType, EmailType, HostnameType, EntryPointType, LabelStringType)
from .path import AbsolutePathParamType, ImportPath
from .plugin import PluginParamType
from .profile import ProfileParamType
from .user import UserParamType
from .test_module import TestModuleParamType
from .workflow import WorkflowParamType
__all__ = (
'LazyChoice', 'IdentifierParamType', 'CalculationParamType', 'CodeParamType', 'ComputerParamType',
'ConfigOptionParamType', 'DataParamType', 'GroupParamType', 'NodeParamType', 'MpirunCommandParamType',
'MultipleValueParamType', 'NonEmptyStringParamType', 'PluginParamType', 'AbsolutePathParamType', 'ShebangParamType',
'UserParamType', 'TestModuleParamType', 'ProfileParamType', 'WorkflowParamType', 'ProcessParamType', 'ImportPath'
)
| 52.405405
| 120
| 0.6787
|
50fc22daf9f187e84aa7cc6c543652dd65b97292
| 338
|
py
|
Python
|
squareroot.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
squareroot.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
squareroot.py
|
m10singh94/Python-programs
|
a83083044b4a85afcf70c4b7024287a808b01fee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 09:24:52 2020
@author: u7075106
"""
import math
import random
def square_root(a):
x = random.randint(1, a/2)
while abs((x**2) - a) > 1e-6:
x = ((x + a/x)/2)
print("calculated sqrt", x)
print("Actual sqrt :", math.sqrt(a))
| 18.777778
| 40
| 0.544379
|
fbabc165b2fb2dd1d7e8cf012483c6c490b6c120
| 1,454
|
py
|
Python
|
enaml/workbench/ui/ui_workbench.py
|
timgates42/enaml
|
054efe6a4047d84f2fff718d656a64a2363884dc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
enaml/workbench/ui/ui_workbench.py
|
timgates42/enaml
|
054efe6a4047d84f2fff718d656a64a2363884dc
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-08-22T14:38:24.000Z
|
2019-08-22T14:38:24.000Z
|
enaml/workbench/ui/ui_workbench.py
|
timgates42/enaml
|
054efe6a4047d84f2fff718d656a64a2363884dc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from __future__ import unicode_literals
from enaml.workbench.workbench import Workbench
UI_PLUGIN = u'enaml.workbench.ui'
class UIWorkbench(Workbench):
""" A class for creating workbench UI applications.
The UIWorkbench class is a subclass of Workbench which loads the
builtin ui plugin and provides an entry point to start the main
application event loop.
"""
def run(self):
""" Run the UI workbench application.
This method will load the core and ui plugins and start the
main application event loop. This is a blocking call which
will return when the application event loop exits.
"""
import enaml
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from enaml.workbench.ui.ui_manifest import UIManifest
self.register(CoreManifest())
self.register(UIManifest())
ui = self.get_plugin(UI_PLUGIN)
ui.show_window()
ui.start_application()
# TODO stop all plugins on app exit?
self.unregister(UI_PLUGIN)
| 30.93617
| 79
| 0.616231
|
0988813302362f9909cf8926d18f6de5666207a4
| 983
|
py
|
Python
|
Cas_1/Salinity/A_General_Salinity.py
|
Herpinemmanuel/Oceanography
|
a37befa33698c0e351b11a4b7561d07a430dc28c
|
[
"MIT"
] | 1
|
2020-01-16T07:20:33.000Z
|
2020-01-16T07:20:33.000Z
|
Cas_1/Salinity/A_General_Salinity.py
|
Herpinemmanuel/Oceanography
|
a37befa33698c0e351b11a4b7561d07a430dc28c
|
[
"MIT"
] | 61
|
2017-06-15T08:37:55.000Z
|
2017-07-20T15:46:29.000Z
|
Cas_1/Salinity/A_General_Salinity.py
|
Herpinemmanuel/Oceanography
|
a37befa33698c0e351b11a4b7561d07a430dc28c
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
plt.ion()
dir1 = '/homedata/bderembl/runmit/test_southatlgyre'
ds1 = open_mdsdataset(dir1,prefix=['S'])
nt = 0
nz = 0
# Cartography S : Salinity
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
ds1['S'][nt,nz,:,:].plot.pcolormesh('XC', 'YC', ax=ax);
plt.title('Case 1 : Salinity')
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
plt.savefig('S_General_Salinity_cas1'+'.png')
plt.clf()
# Averages
Average_S = ds1.S.mean().values
print('Average of Salinity')
print(Average_S,'psu')
Average_S_mask = ds1.S.where(ds1.hFacC>0).mean().values
print('Average of Salinity without continents')
print(Average_S_mask,'psu')
| 25.205128
| 73
| 0.746694
|
4a84bb6cefb51485e4065b1af97e953faf82b49f
| 10,344
|
py
|
Python
|
biosppy/utils.py
|
PatriciaBota/BioSPPy
|
92d97e2b49373069a06dbe6d7711beefa40f2dee
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T12:30:46.000Z
|
2019-08-01T12:30:46.000Z
|
biosppy/utils.py
|
PatriciaBota/BioSPPy
|
92d97e2b49373069a06dbe6d7711beefa40f2dee
|
[
"BSD-3-Clause"
] | null | null | null |
biosppy/utils.py
|
PatriciaBota/BioSPPy
|
92d97e2b49373069a06dbe6d7711beefa40f2dee
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T13:25:52.000Z
|
2019-08-01T13:25:52.000Z
|
# -*- coding: utf-8 -*-
"""
biosppy.utils
-------------
This module provides several frequently used functions and hacks.
:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# compat
from __future__ import absolute_import, division, print_function
from six.moves import map, range, zip
import six
# built-in
import collections
import copy
import keyword
import os
import re
# 3rd party
import numpy as np
def normpath(path):
"""Normalize a path.
Parameters
----------
path : str
The path to normalize.
Returns
-------
npath : str
The normalized path.
"""
if '~' in path:
out = os.path.abspath(os.path.expanduser(path))
else:
out = os.path.abspath(path)
return out
def fileparts(path):
"""split a file path into its directory, name, and extension.
Parameters
----------
path : str
Input file path.
Returns
-------
dirname : str
File directory.
fname : str
File name.
ext : str
File extension.
Notes
-----
* Removes the dot ('.') from the extension.
"""
dirname, fname = os.path.split(path)
fname, ext = os.path.splitext(fname)
ext = ext.replace('.', '')
return dirname, fname, ext
def fullfile(*args):
"""Join one or more file path components, assuming the last is
the extension.
Parameters
----------
``*args`` : list, optional
Components to concatenate.
Returns
-------
fpath : str
The concatenated file path.
"""
nb = len(args)
if nb == 0:
return ''
elif nb == 1:
return args[0]
elif nb == 2:
return os.path.join(*args)
fpath = os.path.join(*args[:-1]) + '.' + args[-1]
return fpath
def walktree(top=None, spec=None):
"""Iterator to recursively descend a directory and return all files
matching the spec.
Parameters
----------
top : str, optional
Starting directory; if None, defaults to the current working directoty.
spec : str, optional
Regular expression to match the desired files;
if None, matches all files; typical patterns:
* `r'\.txt$'` - matches files with '.txt' extension;
* `r'^File_'` - matches files starting with 'File\_'
* `r'^File_.+\.txt$'` - matches files starting with 'File\_' and ending with the '.txt' extension.
Yields
------
fpath : str
Absolute file path.
Notes
-----
* Partial matches are also selected.
See Also
--------
* https://docs.python.org/3/library/re.html
* https://regex101.com/
"""
if top is None:
top = os.getcwd()
if spec is None:
spec = r'.*?'
prog = re.compile(spec)
for root, _, files in os.walk(top):
for name in files:
if prog.search(name):
fname = os.path.join(root, name)
yield fname
def remainderAllocator(votes, k, reverse=True, check=False):
"""Allocate k seats proportionally using the Remainder Method.
Also known as Hare-Niemeyer Method. Uses the Hare quota.
Parameters
----------
votes : list
Number of votes for each class/party/cardinal.
k : int
Total number o seats to allocate.
reverse : bool, optional
If True, allocates remaining seats largest quota first.
check : bool, optional
If True, limits the number of seats to the total number of votes.
Returns
-------
seats : list
Number of seats for each class/party/cardinal.
"""
# check total number of votes
tot = np.sum(votes)
if check and k > tot:
k = tot
# frequencies
length = len(votes)
freqs = np.array(votes, dtype='float') / tot
# assign items
aux = k * freqs
seats = aux.astype('int')
# leftovers
nb = k - seats.sum()
if nb > 0:
if reverse:
ind = np.argsort(aux - seats)[::-1]
else:
ind = np.argsort(aux - seats)
for i in range(nb):
seats[ind[i % length]] += 1
return seats.tolist()
def highestAveragesAllocator(votes, k, divisor='dHondt', check=False):
"""Allocate k seats proportionally using the Highest Averages Method.
Parameters
----------
votes : list
Number of votes for each class/party/cardinal.
k : int
Total number o seats to allocate.
divisor : str, optional
Divisor method; one of 'dHondt', 'Huntington-Hill', 'Sainte-Lague',
'Imperiali', or 'Danish'.
check : bool, optional
If True, limits the number of seats to the total number of votes.
Returns
-------
seats : list
Number of seats for each class/party/cardinal.
"""
# check total number of cardinals
tot = np.sum(votes)
if check and k > tot:
k = tot
# select divisor
if divisor == 'dHondt':
fcn = lambda i: float(i)
elif divisor == 'Huntington-Hill':
fcn = lambda i: np.sqrt(i * (i + 1.))
elif divisor == 'Sainte-Lague':
fcn = lambda i: i - 0.5
elif divisor == 'Imperiali':
fcn = lambda i: float(i + 1)
elif divisor == 'Danish':
fcn = lambda i: 3. * (i - 1.) + 1.
else:
raise ValueError("Unknown divisor method.")
# compute coefficients
tab = []
length = len(votes)
D = [fcn(i) for i in range(1, k + 1)]
for i in range(length):
for j in range(k):
tab.append((i, votes[i] / D[j]))
# sort
tab.sort(key=lambda item: item[1], reverse=True)
tab = tab[:k]
tab = np.array([item[0] for item in tab], dtype='int')
seats = np.zeros(length, dtype='int')
for i in range(length):
seats[i] = np.sum(tab == i)
return seats.tolist()
def random_fraction(indx, fraction, sort=True):
"""Select a random fraction of an input list of elements.
Parameters
----------
indx : list, array
Elements to partition.
fraction : int, float
Fraction to select.
sort : bool, optional
If True, output lists will be sorted.
Returns
-------
use : list, array
Selected elements.
unuse : list, array
Remaining elements.
"""
# number of elements to use
fraction = float(fraction)
nb = int(fraction * len(indx))
# copy because shuffle works in place
aux = copy.deepcopy(indx)
# shuffle
np.random.shuffle(indx)
# select
use = aux[:nb]
unuse = aux[nb:]
# sort
if sort:
use.sort()
unuse.sort()
return use, unuse
class ReturnTuple(tuple):
"""A named tuple to use as a hybrid tuple-dict return object.
Parameters
----------
values : iterable
Return values.
names : iterable, optional
Names for return values.
Raises
------
ValueError
If the number of values differs from the number of names.
ValueError
If any of the items in names:
* contain non-alphanumeric characters;
* are Python keywords;
* start with a number;
* are duplicates.
"""
def __new__(cls, values, names=None):
return tuple.__new__(cls, tuple(values))
def __init__(self, values, names=None):
nargs = len(values)
if names is None:
# create names
names = ['_%d' % i for i in range(nargs)]
else:
# check length
if len(names) != nargs:
raise ValueError("Number of names and values mismatch.")
# convert to str
names = list(map(str, names))
# check for keywords, alphanumeric, digits, repeats
seen = set()
for name in names:
if not all(c.isalnum() or (c == '_') for c in name):
raise ValueError("Names can only contain alphanumeric \
characters and underscores: %r." % name)
if keyword.iskeyword(name):
raise ValueError("Names cannot be a keyword: %r." % name)
if name[0].isdigit():
raise ValueError("Names cannot start with a number: %r." %
name)
if name in seen:
raise ValueError("Encountered duplicate name: %r." % name)
seen.add(name)
self._names = names
self._values = values
def as_dict(self):
"""Convert to an ordered dictionary.
Returns
-------
out : OrderedDict
An OrderedDict representing the return values.
"""
return collections.OrderedDict(zip(self._names, self))
__dict__ = property(as_dict)
def __getitem__(self, key):
"""Get item as an index or keyword.
Returns
-------
out : object
The object corresponding to the key, if it exists.
Raises
------
KeyError
If the key is a string and it does not exist in the mapping.
IndexError
If the key is an int and it is out of range.
"""
if isinstance(key, six.string_types):
if key not in self._names:
raise KeyError("Unknown key: %r." % key)
key = self._names.index(key)
return super(ReturnTuple, self).__getitem__(key)
def __repr__(self):
"""Return representation string."""
tpl = '%s=%r'
rp = ', '.join(tpl % item for item in zip(self._names, self))
return 'ReturnTuple(%s)' % rp
def __getnewargs__(self):
"""Return self as a plain tuple; used for copy and pickle."""
return tuple(self)
def keys(self):
"""Return the value names.
Returns
-------
out : list
The keys in the mapping.
"""
return list(self._names)
def values(self):
"""Return the varibale values.
Returns
-------
out : list
The values in the mapping.
"""
return list(self._values)
| 23.037862
| 110
| 0.549884
|
7db307a40d21bf184d0f12ca1a90f8b6c147feff
| 1,615
|
py
|
Python
|
tests/compiler/division/test_division.py
|
eternalSeeker/pcc
|
c2b64c2b9e3f9374063854fff953c19efcdad514
|
[
"MIT"
] | 1
|
2019-10-09T14:24:01.000Z
|
2019-10-09T14:24:01.000Z
|
tests/compiler/division/test_division.py
|
eternalSeeker/pcc
|
c2b64c2b9e3f9374063854fff953c19efcdad514
|
[
"MIT"
] | null | null | null |
tests/compiler/division/test_division.py
|
eternalSeeker/pcc
|
c2b64c2b9e3f9374063854fff953c19efcdad514
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from os.path import abspath, dirname
import pytest
import tests.generateOutputsDecorator
from tests.compiler.CompilerHelper import CompilerHelper, \
generate_compiler_outputs
generate_outputs = tests.generateOutputsDecorator.generate_outputs
# The parametrize function is generated, so it does not work to import
parametrize = pytest.mark.parametrize
files_to_test = [
('char.c', 'charHelper.c', 'char.out'),
('int.c', 'intHelper.c', 'int.out'),
('float.c', 'floatHelper.c', 'float.out'),
('double.c', 'doubleHelper.c', 'double.out'),
('charInit.c', 'charHelper.c', 'charInit.out'),
('intInit.c', 'intHelper.c', 'intInit.out'),
('floatInit.c', 'floatHelper.c', 'floatInit.out'),
('doubleInit.c', 'doubleHelper.c', 'doubleInit.out'),
('charVarAndConstant.c', 'charHelper.c', 'charVarAndConstant.out'),
('intVarAndConstant.c', 'intHelper.c', 'intVarAndConstant.out'),
('floatVarAndConstant.c', 'floatHelper.c', 'floatVarAndConstant.out'),
('doubleVarAndConstant.c', 'doubleHelper.c', 'doubleVarAndConstant.out'),
]
@generate_outputs
def generate_compiler_test_outputs():
path_of_this_file = abspath(dirname(__file__))
generate_compiler_outputs(files_to_test, path_of_this_file)
class TestDivision(CompilerHelper):
@parametrize('file_to_test,helper,output_file', files_to_test)
def test_division(self, file_to_test, helper, output_file, capsys):
path_of_this_file = abspath(dirname(__file__))
self.execute_test(file_to_test, helper, output_file, capsys,
path_of_this_file)
| 35.888889
| 77
| 0.713313
|
e9bb9ff9429ec639f3c7a659e0450bf1fe596420
| 970
|
py
|
Python
|
app/conftest.py
|
dxw/nhx-website
|
88344c7602823e0b371d3c6f933aa24aeb57db16
|
[
"MIT"
] | 50
|
2019-04-04T17:50:00.000Z
|
2021-08-05T15:08:37.000Z
|
app/conftest.py
|
dxw/nhx-website
|
88344c7602823e0b371d3c6f933aa24aeb57db16
|
[
"MIT"
] | 434
|
2019-04-04T18:25:32.000Z
|
2022-03-31T18:23:37.000Z
|
app/conftest.py
|
nhsx-mirror/nhsx-website
|
2133b4e275ca35ff77f7d6874e809f139ec4bf86
|
[
"MIT"
] | 23
|
2019-04-04T09:52:07.000Z
|
2021-04-11T07:41:47.000Z
|
# -*- coding: utf-8 -*-
"""
conftest
~~~~~~~~
Pytest config.
"""
import pytest
from consoler import console # NOQA
try:
import envkey # NOQA
except Exception:
pass
from tests.init import setup
from tests.fixtures import * # NOQA
from modules.core.tests.fixtures import * # NOQA
from modules.home.tests.fixtures import * # NOQA
from modules.blog_posts.tests.fixtures import * # NOQA
from modules.publications.tests.fixtures import * # NOQA
from modules.news.tests.fixtures import * # NOQA
from modules.users.tests.fixtures import * # NOQA
pytestmark = pytest.mark.django_db
@pytest.fixture(autouse=True)
def enable_db_access_for_all_tests(db):
pass
@pytest.fixture(scope="session")
def django_db_setup(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
setup()
@pytest.fixture(scope="session", autouse=True)
def faker_session_locale():
return "en_US"
return ["it_IT", "ja_JP", "en_US"]
| 21.555556
| 57
| 0.715464
|
452c5ec86ee25af24f44a7412bf98c148acf8d59
| 32,036
|
py
|
Python
|
vispy/app/backends/_qt.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 1
|
2021-08-04T06:31:02.000Z
|
2021-08-04T06:31:02.000Z
|
vispy/app/backends/_qt.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/app/backends/_qt.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Base code for the Qt backends. Note that this is *not* (anymore) a
backend by itself! One has to explicitly use either PySide, PyQt4 or
PySide2, PyQt5. Note that the automatic backend selection prefers
a GUI toolkit that is already imported.
The _pyside, _pyqt4, _pyside2, _pyqt5 and _pyside6 modules will
import * from this module, and also keep a ref to the module object.
Note that if two of the backends are used, this module is actually
reloaded. This is a sorts of poor mans "subclassing" to get a working
version for both backends using the same code.
Note that it is strongly discouraged to use the
PySide/PyQt4/PySide2/PyQt5/PySide6 backends simultaneously. It is
known to cause unpredictable behavior and segfaults.
"""
from __future__ import division
from time import sleep, time
import os
import sys
import atexit
import ctypes
from distutils.version import LooseVersion
from ...util import logger
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util import keys
from ... import config
from . import qt_lib
USE_EGL = config['gl_backend'].lower().startswith('es')
# Get platform
IS_LINUX = IS_OSX = IS_WIN = IS_RPI = False
if sys.platform.startswith('linux'):
if os.uname()[4].startswith('arm'):
IS_RPI = True
else:
IS_LINUX = True
elif sys.platform.startswith('darwin'):
IS_OSX = True
elif sys.platform.startswith('win'):
IS_WIN = True
# -------------------------------------------------------------------- init ---
def _check_imports(lib):
# Make sure no conflicting libraries have been imported.
libs = ['PyQt4', 'PyQt5', 'PySide', 'PySide2', 'PySide6']
libs.remove(lib)
for lib2 in libs:
lib2 += '.QtCore'
if lib2 in sys.modules:
raise RuntimeError("Refusing to import %s because %s is already "
"imported." % (lib, lib2))
# Get what qt lib to try. This tells us wheter this module is imported
# via _pyside or _pyqt4 or _pyqt5
QGLWidget = object
QT5_NEW_API = False
QT6_NEW_API = False
if qt_lib == 'pyqt4':
_check_imports('PyQt4')
if not USE_EGL:
from PyQt4.QtOpenGL import QGLWidget, QGLFormat
from PyQt4 import QtGui, QtCore, QtTest
QWidget, QApplication = QtGui.QWidget, QtGui.QApplication # Compat
elif qt_lib == 'pyqt5':
_check_imports('PyQt5')
if not USE_EGL:
from PyQt5.QtCore import QT_VERSION_STR
if LooseVersion(QT_VERSION_STR) >= '5.4.0':
from PyQt5.QtWidgets import QOpenGLWidget as QGLWidget
from PyQt5.QtGui import QSurfaceFormat as QGLFormat
QT5_NEW_API = True
else:
from PyQt5.QtOpenGL import QGLWidget, QGLFormat
from PyQt5 import QtGui, QtCore, QtWidgets, QtTest
QWidget, QApplication = QtWidgets.QWidget, QtWidgets.QApplication # Compat
elif qt_lib == 'pyside6':
_check_imports('PySide6')
if not USE_EGL:
from PySide6.QtCore import __version__ as QT_VERSION_STR
if LooseVersion(QT_VERSION_STR) >= '6.0.0':
from PySide6.QtOpenGLWidgets import QOpenGLWidget as QGLWidget
from PySide6.QtGui import QSurfaceFormat as QGLFormat
QT6_NEW_API = True
else:
from PySide6.QtOpenGL import QGLWidget, QGLFormat
from PySide6 import QtGui, QtCore, QtWidgets, QtTest
QWidget, QApplication = QtWidgets.QWidget, QtWidgets.QApplication # Compat
elif qt_lib == 'pyside2':
_check_imports('PySide2')
if not USE_EGL:
from PySide2.QtCore import __version__ as QT_VERSION_STR
if LooseVersion(QT_VERSION_STR) >= '5.4.0':
from PySide2.QtWidgets import QOpenGLWidget as QGLWidget
from PySide2.QtGui import QSurfaceFormat as QGLFormat
QT5_NEW_API = True
else:
from PySide2.QtOpenGL import QGLWidget, QGLFormat
from PySide2 import QtGui, QtCore, QtWidgets, QtTest
QWidget, QApplication = QtWidgets.QWidget, QtWidgets.QApplication # Compat
elif qt_lib == 'pyside':
_check_imports('PySide')
if not USE_EGL:
from PySide.QtOpenGL import QGLWidget, QGLFormat
from PySide import QtGui, QtCore, QtTest
QWidget, QApplication = QtGui.QWidget, QtGui.QApplication # Compat
elif qt_lib:
raise RuntimeError("Invalid value for qt_lib %r." % qt_lib)
else:
raise RuntimeError("Module backends._qt should not be imported directly.")
# todo: add support for distinguishing left and right shift/ctrl/alt keys.
# Linux scan codes: (left, right)
# Shift 50, 62
# Ctrl 37, 105
# Alt 64, 108
KEYMAP = {
QtCore.Qt.Key_Shift: keys.SHIFT,
QtCore.Qt.Key_Control: keys.CONTROL,
QtCore.Qt.Key_Alt: keys.ALT,
QtCore.Qt.Key_AltGr: keys.ALT,
QtCore.Qt.Key_Meta: keys.META,
QtCore.Qt.Key_Left: keys.LEFT,
QtCore.Qt.Key_Up: keys.UP,
QtCore.Qt.Key_Right: keys.RIGHT,
QtCore.Qt.Key_Down: keys.DOWN,
QtCore.Qt.Key_PageUp: keys.PAGEUP,
QtCore.Qt.Key_PageDown: keys.PAGEDOWN,
QtCore.Qt.Key_Insert: keys.INSERT,
QtCore.Qt.Key_Delete: keys.DELETE,
QtCore.Qt.Key_Home: keys.HOME,
QtCore.Qt.Key_End: keys.END,
QtCore.Qt.Key_Escape: keys.ESCAPE,
QtCore.Qt.Key_Backspace: keys.BACKSPACE,
QtCore.Qt.Key_F1: keys.F1,
QtCore.Qt.Key_F2: keys.F2,
QtCore.Qt.Key_F3: keys.F3,
QtCore.Qt.Key_F4: keys.F4,
QtCore.Qt.Key_F5: keys.F5,
QtCore.Qt.Key_F6: keys.F6,
QtCore.Qt.Key_F7: keys.F7,
QtCore.Qt.Key_F8: keys.F8,
QtCore.Qt.Key_F9: keys.F9,
QtCore.Qt.Key_F10: keys.F10,
QtCore.Qt.Key_F11: keys.F11,
QtCore.Qt.Key_F12: keys.F12,
QtCore.Qt.Key_Space: keys.SPACE,
QtCore.Qt.Key_Enter: keys.ENTER,
QtCore.Qt.Key_Return: keys.ENTER,
QtCore.Qt.Key_Tab: keys.TAB,
}
BUTTONMAP = {0: 0, 1: 1, 2: 2, 4: 3, 8: 4, 16: 5}
# Properly log Qt messages
def message_handler(*args):
if qt_lib in ("pyqt4", "pyside"):
msg_type, msg = args
elif qt_lib in ("pyqt5", "pyside2", "pyside6"): # Is this correct for pyside2?
msg_type, context, msg = args
elif qt_lib:
raise RuntimeError("Invalid value for qt_lib %r." % qt_lib)
else:
raise RuntimeError("Module backends._qt ",
"should not be imported directly.")
BLACKLIST = [
# Ignore spam about tablet input
'QCocoaView handleTabletEvent: This tablet device is unknown',
# Not too sure why this warning is emitted when using
# Spyder + PyQt5 + Vispy
# https://github.com/vispy/vispy/issues/1787
# In either case, it is really annoying. We should filter it away
'QSocketNotifier: Multiple socket notifiers for same',
]
for item in BLACKLIST:
if msg.startswith(item):
return
msg = msg.decode() if not isinstance(msg, str) else msg
logger.warning(msg)
def use_shared_contexts():
"""Enable context sharing for PyQt5 5.4+ API applications.
This is disabled by default for PyQt5 5.4+ due to occasional segmentation
faults and other issues when contexts are shared.
"""
forced_env_var = os.getenv('VISPY_PYQT5_SHARE_CONTEXT', 'false').lower() == 'true'
return not (QT5_NEW_API or QT6_NEW_API) or forced_env_var
try:
QtCore.qInstallMsgHandler(message_handler)
except AttributeError:
QtCore.qInstallMessageHandler(message_handler) # PyQt5
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=use_shared_contexts(),
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set the OpenGL configuration"""
glformat = QGLFormat()
glformat.setRedBufferSize(c['red_size'])
glformat.setGreenBufferSize(c['green_size'])
glformat.setBlueBufferSize(c['blue_size'])
glformat.setAlphaBufferSize(c['alpha_size'])
if QT5_NEW_API or QT6_NEW_API:
# Qt5 >= 5.4.0 - below options automatically enabled if nonzero.
glformat.setSwapBehavior(glformat.DoubleBuffer if c['double_buffer']
else glformat.SingleBuffer)
else:
# Qt4 and Qt5 < 5.4.0 - buffers must be explicitly requested.
glformat.setAccum(False)
glformat.setRgba(True)
glformat.setDoubleBuffer(True if c['double_buffer'] else False)
glformat.setDepth(True if c['depth_size'] else False)
glformat.setStencil(True if c['stencil_size'] else False)
glformat.setSampleBuffers(True if c['samples'] else False)
glformat.setDepthBufferSize(c['depth_size'] if c['depth_size'] else 0)
glformat.setStencilBufferSize(c['stencil_size'] if c['stencil_size']
else 0)
glformat.setSamples(c['samples'] if c['samples'] else 0)
glformat.setStereo(c['stereo'])
return glformat
# ------------------------------------------------------------- application ---
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
# sharing is currently buggy and causes segmentation faults for tests with PyQt 5.6
if (QT5_NEW_API or QT6_NEW_API) and use_shared_contexts():
# For Qt5 >= 5.4.0 - Enable sharing of context between windows.
QApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts, True)
def _vispy_get_backend_name(self):
name = QtCore.__name__.split('.')[0]
return name
def _vispy_process_events(self):
app = self._vispy_get_native_app()
# sendPostedEvents replaces flush which has been removed from Qt6.0+
# This should be compatible with Qt4.x and Qt5.x
app.sendPostedEvents()
app.processEvents()
def _vispy_run(self):
app = self._vispy_get_native_app()
if hasattr(app, '_in_event_loop') and app._in_event_loop:
pass # Already in event loop
else:
return app.exec_()
def _vispy_quit(self):
return self._vispy_get_native_app().quit()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
app = QApplication.instance()
if app is None:
app = QApplication([''])
# Store so it won't be deleted, but not on a vispy object,
# or an application may produce error when closed.
QtGui._qApp = app
# Return
return app
def _vispy_sleep(self, duration_sec):
QtTest.QTest.qWait(duration_sec * 1000) # in ms
# ------------------------------------------------------------------ canvas ---
def _get_qpoint_pos(pos):
"""Return the coordinates of a QPointF object."""
return pos.x(), pos.y()
class QtBaseCanvasBackend(BaseCanvasBackend):
"""Base functionality of Qt backend. No OpenGL Stuff."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
# Maybe to ensure that exactly all arguments are passed?
p = self._process_backend_kwargs(kwargs)
self._initialized = False
# Init in desktop GL or EGL way
self._init_specific(p, kwargs)
assert self._initialized
self.setMouseTracking(True)
self._vispy_set_title(p.title)
self._vispy_set_size(*p.size)
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for Qt '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
# must set physical size before setting visible or fullscreen
# operations may make the size invalid
if hasattr(self, 'devicePixelRatio'):
# handle high DPI displays in PyQt5
ratio = self.devicePixelRatio()
else:
ratio = 1
self._physical_size = (p.size[0] * ratio, p.size[1] * ratio)
if not p.resizable:
self.setFixedSize(self.size())
if p.position is not None:
self._vispy_set_position(*p.position)
if p.show:
self._vispy_set_visible(True)
# Qt supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
try:
# see screen_changed docstring for more details
self.window().windowHandle().screenChanged.connect(self.screen_changed)
except AttributeError:
# either not PyQt5 backend or no parent window available
pass
# Activate touch and gesture.
# NOTE: we only activate touch on OS X because there seems to be
# problems on Ubuntu computers with touchscreen.
# See https://github.com/vispy/vispy/pull/1143
if sys.platform == 'darwin':
self.setAttribute(QtCore.Qt.WA_AcceptTouchEvents)
self.grabGesture(QtCore.Qt.PinchGesture)
def screen_changed(self, new_screen):
"""Window moved from one display to another, resize canvas.
If display resolutions are the same this is essentially a no-op except for the redraw.
If the display resolutions differ (HiDPI versus regular displays) the canvas needs to
be redrawn to reset the physical size based on the current `devicePixelRatio()` and
redrawn with that new size.
"""
self.resizeGL(*self._vispy_get_size())
def _vispy_warmup(self):
etime = time() + 0.25
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._vispy_canvas is None:
return
self.setWindowTitle(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
self.resize(w, h)
def _vispy_set_physical_size(self, w, h):
self._physical_size = (w, h)
def _vispy_get_physical_size(self):
if self._vispy_canvas is None:
return
return self._physical_size
def _vispy_set_position(self, x, y):
# Set location of the widget or window. May have no effect for widgets
self.move(x, y)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
if visible:
if self._fullscreen:
self.showFullScreen()
else:
self.showNormal()
else:
self.hide()
def _vispy_set_fullscreen(self, fullscreen):
self._fullscreen = bool(fullscreen)
self._vispy_set_visible(True)
def _vispy_get_fullscreen(self):
return self._fullscreen
def _vispy_update(self):
if self._vispy_canvas is None:
return
# Invoke a redraw
self.update()
def _vispy_get_position(self):
g = self.geometry()
return g.x(), g.y()
def _vispy_get_size(self):
g = self.geometry()
return g.width(), g.height()
def sizeHint(self):
return self.size()
def mousePressEvent(self, ev):
if self._vispy_canvas is None:
return
self._vispy_mouse_press(
native=ev,
pos=(ev.pos().x(), ev.pos().y()),
button=BUTTONMAP.get(ev.button(), 0),
modifiers=self._modifiers(ev),
)
def mouseReleaseEvent(self, ev):
if self._vispy_canvas is None:
return
self._vispy_mouse_release(
native=ev,
pos=(ev.pos().x(), ev.pos().y()),
button=BUTTONMAP[ev.button()],
modifiers=self._modifiers(ev),
)
def mouseDoubleClickEvent(self, ev):
if self._vispy_canvas is None:
return
self._vispy_mouse_double_click(
native=ev,
pos=(ev.pos().x(), ev.pos().y()),
button=BUTTONMAP.get(ev.button(), 0),
modifiers=self._modifiers(ev),
)
def mouseMoveEvent(self, ev):
if self._vispy_canvas is None:
return
self._vispy_mouse_move(
native=ev,
pos=(ev.pos().x(), ev.pos().y()),
modifiers=self._modifiers(ev),
)
def wheelEvent(self, ev):
if self._vispy_canvas is None:
return
# Get scrolling
deltax, deltay = 0.0, 0.0
if hasattr(ev, 'orientation'):
if ev.orientation == QtCore.Qt.Horizontal:
deltax = ev.delta() / 120.0
else:
deltay = ev.delta() / 120.0
else:
# PyQt5
delta = ev.angleDelta()
deltax, deltay = delta.x() / 120.0, delta.y() / 120.0
if hasattr(ev, 'pos'):
posx, posy = ev.pos().x(), ev.pos().y()
else:
# Compatibility for PySide6
posx, posy = ev.position().x(), ev.position().y()
# Emit event
self._vispy_canvas.events.mouse_wheel(
native=ev,
delta=(deltax, deltay),
pos=(posx, posy),
modifiers=self._modifiers(ev),
)
def keyPressEvent(self, ev):
self._keyEvent(self._vispy_canvas.events.key_press, ev)
def keyReleaseEvent(self, ev):
self._keyEvent(self._vispy_canvas.events.key_release, ev)
def event(self, ev):
out = super(QtBaseCanvasBackend, self).event(ev)
t = ev.type()
# Two-finger pinch.
if (t == QtCore.QEvent.TouchBegin):
self._vispy_canvas.events.touch(type='begin')
if (t == QtCore.QEvent.TouchEnd):
self._vispy_canvas.events.touch(type='end')
if (t == QtCore.QEvent.Gesture):
gesture = ev.gesture(QtCore.Qt.PinchGesture)
if gesture:
(x, y) = _get_qpoint_pos(gesture.centerPoint())
scale = gesture.scaleFactor()
last_scale = gesture.lastScaleFactor()
rotation = gesture.rotationAngle()
self._vispy_canvas.events.touch(
type="pinch",
pos=(x, y),
last_pos=None,
scale=scale,
last_scale=last_scale,
rotation=rotation,
total_rotation_angle=gesture.totalRotationAngle(),
total_scale_factor=gesture.totalScaleFactor(),
)
# General touch event.
elif (t == QtCore.QEvent.TouchUpdate):
points = ev.touchPoints()
# These variables are lists of (x, y) coordinates.
pos = [_get_qpoint_pos(p.pos()) for p in points]
lpos = [_get_qpoint_pos(p.lastPos()) for p in points]
self._vispy_canvas.events.touch(type='touch',
pos=pos,
last_pos=lpos,
)
return out
def _keyEvent(self, func, ev):
# evaluates the keycode of qt, and transform to vispy key.
key = int(ev.key())
if key in KEYMAP:
key = KEYMAP[key]
elif key >= 32 and key <= 127:
key = keys.Key(chr(key))
else:
key = None
mod = self._modifiers(ev)
func(native=ev, key=key, text=str(ev.text()), modifiers=mod)
def _modifiers(self, event):
# Convert the QT modifier state into a tuple of active modifier keys.
mod = ()
qtmod = event.modifiers()
for q, v in ([QtCore.Qt.ShiftModifier, keys.SHIFT],
[QtCore.Qt.ControlModifier, keys.CONTROL],
[QtCore.Qt.AltModifier, keys.ALT],
[QtCore.Qt.MetaModifier, keys.META]):
if q & qtmod:
mod += (v,)
return mod
_EGL_DISPLAY = None
egl = None
# todo: Make work on Windows
# todo: Make work without readpixels on Linux?
# todo: Make work on OSX?
# todo: Make work on Raspberry Pi!
class CanvasBackendEgl(QtBaseCanvasBackend, QWidget):
def _init_specific(self, p, kwargs):
# Initialize egl. Note that we only import egl if needed.
global _EGL_DISPLAY
global egl
if egl is None:
from ...ext import egl as _egl
egl = _egl
# Use MESA driver on Linux
if IS_LINUX and not IS_RPI:
os.environ['EGL_SOFTWARE'] = 'true'
# Create and init display
_EGL_DISPLAY = egl.eglGetDisplay()
CanvasBackendEgl._EGL_VERSION = egl.eglInitialize(_EGL_DISPLAY)
atexit.register(egl.eglTerminate, _EGL_DISPLAY)
# Deal with context
p.context.shared.add_ref('qt-egl', self)
if p.context.shared.ref is self:
self._native_config = c = egl.eglChooseConfig(_EGL_DISPLAY)[0]
self._native_context = egl.eglCreateContext(_EGL_DISPLAY, c, None)
else:
self._native_config = p.context.shared.ref._native_config
self._native_context = p.context.shared.ref._native_context
# Init widget
if p.always_on_top or not p.decorate:
hint = 0
hint |= 0 if p.decorate else QtCore.Qt.FramelessWindowHint
hint |= QtCore.Qt.WindowStaysOnTopHint if p.always_on_top else 0
else:
hint = QtCore.Qt.Widget # can also be a window type
QWidget.__init__(self, p.parent, hint)
if 0: # IS_LINUX or IS_RPI:
self.setAutoFillBackground(False)
self.setAttribute(QtCore.Qt.WA_NoSystemBackground, True)
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
elif IS_WIN:
self.setAttribute(QtCore.Qt.WA_PaintOnScreen, True)
self.setAutoFillBackground(False)
# Init surface
w = self.get_window_id()
self._surface = egl.eglCreateWindowSurface(_EGL_DISPLAY, c, w)
self.initializeGL()
self._initialized = True
def get_window_id(self):
"""Get the window id of a PySide Widget. Might also work for PyQt4."""
# Get Qt win id
winid = self.winId()
# On Linux this is it
if IS_RPI:
nw = (ctypes.c_int * 3)(winid, self.width(), self.height())
return ctypes.pointer(nw)
elif IS_LINUX:
return int(winid) # Is int on PySide, but sip.voidptr on PyQt
# Get window id from stupid capsule thingy
# http://translate.google.com/translate?hl=en&sl=zh-CN&u=http://www.cnb
# logs.com/Shiren-Y/archive/2011/04/06/2007288.html&prev=/search%3Fq%3Dp
# yside%2Bdirectx%26client%3Dfirefox-a%26hs%3DIsJ%26rls%3Dorg.mozilla:n
# l:official%26channel%3Dfflb%26biw%3D1366%26bih%3D614
# Prepare
ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p
ctypes.pythonapi.PyCapsule_GetName.argtypes = [ctypes.py_object]
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object,
ctypes.c_char_p]
# Extract handle from capsule thingy
name = ctypes.pythonapi.PyCapsule_GetName(winid)
handle = ctypes.pythonapi.PyCapsule_GetPointer(winid, name)
return handle
def _vispy_close(self):
# Destroy EGL surface
if self._surface is not None:
egl.eglDestroySurface(_EGL_DISPLAY, self._surface)
self._surface = None
# Force the window or widget to shut down
self.close()
def _vispy_set_current(self):
egl.eglMakeCurrent(_EGL_DISPLAY, self._surface,
self._surface, self._native_context)
def _vispy_swap_buffers(self):
egl.eglSwapBuffers(_EGL_DISPLAY, self._surface)
def initializeGL(self):
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
def resizeEvent(self, event):
w, h = event.size().width(), event.size().height()
self._vispy_canvas.events.resize(size=(w, h))
def paintEvent(self, event):
self._vispy_canvas.events.draw(region=None)
if IS_LINUX or IS_RPI:
# Arg, cannot get GL to draw to the widget, so we take a
# screenshot and draw that for now ...
# Further, QImage keeps a ref to the data that we pass, so
# we need to use a static buffer to prevent memory leakage
from ... import gloo
import numpy as np
if not hasattr(self, '_gl_buffer'):
self._gl_buffer = np.ones((3000 * 3000 * 4), np.uint8) * 255
# Take screenshot and turn into RGB QImage
im = gloo.read_pixels()
sze = im.shape[0] * im.shape[1]
self._gl_buffer[0:0+sze*4:4] = im[:, :, 2].ravel()
self._gl_buffer[1:0+sze*4:4] = im[:, :, 1].ravel()
self._gl_buffer[2:2+sze*4:4] = im[:, :, 0].ravel()
img = QtGui.QImage(self._gl_buffer, im.shape[1], im.shape[0],
QtGui.QImage.Format_RGB32)
# Paint the image
painter = QtGui.QPainter()
painter.begin(self)
rect = QtCore.QRect(0, 0, self.width(), self.height())
painter.drawImage(rect, img)
painter.end()
def paintEngine(self):
if IS_LINUX and not IS_RPI:
# For now we are drawing a screenshot
return QWidget.paintEngine(self)
else:
return None # Disable Qt's native drawing system
class CanvasBackendDesktop(QtBaseCanvasBackend, QGLWidget):
def _init_specific(self, p, kwargs):
# Deal with config
glformat = _set_config(p.context.config)
glformat.setSwapInterval(1 if p.vsync else 0)
# Deal with context
widget = kwargs.pop('shareWidget', None) or self
p.context.shared.add_ref('qt', widget)
if p.context.shared.ref is widget:
if widget is self:
widget = None # QGLWidget does not accept self ;)
else:
widget = p.context.shared.ref
if 'shareWidget' in kwargs:
raise RuntimeError('Cannot use vispy to share context and '
'use built-in shareWidget.')
if p.always_on_top or not p.decorate:
hint = 0
hint |= 0 if p.decorate else QtCore.Qt.FramelessWindowHint
hint |= QtCore.Qt.WindowStaysOnTopHint if p.always_on_top else 0
else:
hint = QtCore.Qt.Widget # can also be a window type
if QT5_NEW_API or QT6_NEW_API:
# Qt5 >= 5.4.0 - sharing is automatic
QGLWidget.__init__(self, p.parent, hint)
# Need to create an offscreen surface so we can get GL parameters
# without opening/showing the Widget. PyQt5 >= 5.4 will create the
# valid context later when the widget is shown.
self._secondary_context = QtGui.QOpenGLContext()
self._secondary_context.setShareContext(self.context())
self._secondary_context.setFormat(glformat)
self._secondary_context.create()
self._surface = QtGui.QOffscreenSurface()
self._surface.setFormat(glformat)
self._surface.create()
self._secondary_context.makeCurrent(self._surface)
else:
# Qt4 and Qt5 < 5.4.0 - sharing is explicitly requested
QGLWidget.__init__(self, p.parent, widget, hint)
# unused with this API
self._secondary_context = None
self._surface = None
self.setFormat(glformat)
self._initialized = True
if not QT5_NEW_API and not QT6_NEW_API and not self.isValid():
# On Qt5 >= 5.4.0, isValid is only true once the widget is shown
raise RuntimeError('context could not be created')
if not QT5_NEW_API and not QT6_NEW_API:
# to make consistent with other backends
self.setAutoBufferSwap(False)
self.setFocusPolicy(QtCore.Qt.WheelFocus)
def _vispy_close(self):
# Force the window or widget to shut down
self.close()
self.doneCurrent()
if not QT5_NEW_API and not QT6_NEW_API:
self.context().reset()
if self._vispy_canvas is not None:
self._vispy_canvas.app.process_events()
self._vispy_canvas.app.process_events()
def _vispy_set_current(self):
if self._vispy_canvas is None:
return # todo: can we get rid of this now?
if self.isValid():
self.makeCurrent()
def _vispy_swap_buffers(self):
# Swap front and back buffer
if self._vispy_canvas is None:
return
if QT5_NEW_API or QT6_NEW_API:
ctx = self.context()
ctx.swapBuffers(ctx.surface())
else:
self.swapBuffers()
def _vispy_get_fb_bind_location(self):
if QT5_NEW_API or QT6_NEW_API:
return self.defaultFramebufferObject()
else:
return QtBaseCanvasBackend._vispy_get_fb_bind_location(self)
def initializeGL(self):
if self._vispy_canvas is None:
return
self._vispy_canvas.events.initialize()
def resizeGL(self, w, h):
if self._vispy_canvas is None:
return
if hasattr(self, 'devicePixelRatio'):
# We take into account devicePixelRatio, which is non-unity on
# e.g HiDPI displays.
# self.devicePixelRatio() is a float and should have been in Qt5 according to the documentation
ratio = int(self.devicePixelRatio())
w = w * ratio
h = h * ratio
self._vispy_set_physical_size(w, h)
self._vispy_canvas.events.resize(size=(self.width(), self.height()),
physical_size=(w, h))
def paintGL(self):
if self._vispy_canvas is None:
return
# (0, 0, self.width(), self.height()))
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
# Clear the alpha channel with QOpenGLWidget (Qt >= 5.4), otherwise the
# window is translucent behind non-opaque objects.
# Reference: MRtrix3/mrtrix3#266
if QT5_NEW_API or QT6_NEW_API:
context = self._vispy_canvas.context
context.set_color_mask(False, False, False, True)
context.clear(color=True, depth=False, stencil=False)
context.set_color_mask(True, True, True, True)
context.flush()
# Select CanvasBackend
if USE_EGL:
CanvasBackend = CanvasBackendEgl
else:
CanvasBackend = CanvasBackendDesktop
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend, QtCore.QTimer):
def __init__(self, vispy_timer):
# Make sure there is an app
app = ApplicationBackend()
app._vispy_get_native_app()
# Init
BaseTimerBackend.__init__(self, vispy_timer)
QtCore.QTimer.__init__(self)
self.timeout.connect(self._vispy_timeout)
def _vispy_start(self, interval):
self.start(interval * 1000.)
def _vispy_stop(self):
self.stop()
def _vispy_timeout(self):
self._vispy_timer._timeout()
| 36.117249
| 107
| 0.611624
|
fe8386ae52c111a7fa07f53884d4e9631099ff26
| 662
|
py
|
Python
|
checkrp.py
|
BartMassey/meme-voting
|
b04fd2ab0927d4760172d2e9333be807b6d84d27
|
[
"MIT"
] | 4
|
2020-02-01T04:56:22.000Z
|
2020-05-11T06:31:38.000Z
|
checkrp.py
|
BartMassey/meme-voting
|
b04fd2ab0927d4760172d2e9333be807b6d84d27
|
[
"MIT"
] | null | null | null |
checkrp.py
|
BartMassey/meme-voting
|
b04fd2ab0927d4760172d2e9333be807b6d84d27
|
[
"MIT"
] | null | null | null |
# Check the relative ranking of two candidates.
# Bart Massey
import csv
import sys
ballots = "meme-election-ballots.csv"
if len(sys.argv) > 3:
ballots = sys.argv[3]
ballots = open(ballots, 'r')
ballots = csv.reader(ballots)
header = next(ballots)
ballots = [[int(rank) - 1 for rank in ballot] for ballot in ballots]
ncandidates = len(header)
header_index = { name : candidate for candidate, name in enumerate(header) }
c1 = header_index[sys.argv[1]]
c2 = header_index[sys.argv[2]]
v1 = 0
v2 = 0
for ballot in ballots:
if ballot[c1] < ballot[c2]:
v1 += 1
else:
v2 += 1
print(header[c1], v1)
print(header[c2], v2)
print(v1 - v2)
| 20.6875
| 76
| 0.669184
|
4e93b88f04539023c23627328087e52207beaa18
| 731
|
py
|
Python
|
ccs_amplicon/label_haplotype_seqs.py
|
lumc-pgx/CCS-Amplicon
|
0f4f35c468e6e5f43f4ed037da5238e3ab0bc22f
|
[
"MIT"
] | null | null | null |
ccs_amplicon/label_haplotype_seqs.py
|
lumc-pgx/CCS-Amplicon
|
0f4f35c468e6e5f43f4ed037da5238e3ab0bc22f
|
[
"MIT"
] | null | null | null |
ccs_amplicon/label_haplotype_seqs.py
|
lumc-pgx/CCS-Amplicon
|
0f4f35c468e6e5f43f4ed037da5238e3ab0bc22f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from __future__ import print_function
import click
from Bio import SeqIO
import sys
@click.command(context_settings=dict(
ignore_unknown_options=True,
),
short_help="rename the sequences created by LAA"
)
@click.option("--prefix", "-p", type=str, default="haplotype",
help="prefix to use for the sequence id/name")
@click.argument("sequence_fastq", type=click.Path(exists=True))
def cli_handler(prefix, sequence_fastq):
for record in SeqIO.parse(sequence_fastq, "fastq"):
name = prefix
record.id = name
record.name = name
record.description = name
SeqIO.write(record, sys.stdout, "fastq")
if __name__ == '__main__':
cli_handler()
| 27.074074
| 63
| 0.685363
|
0c8f8d1507c82e91cc7bb8c53f27c188d85c60c3
| 5,419
|
py
|
Python
|
examples/training/avg_word_embeddings/training_stsbenchmark_tf-idf_word_embeddings.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 7,566
|
2019-07-25T07:45:17.000Z
|
2022-03-31T22:15:35.000Z
|
examples/training/avg_word_embeddings/training_stsbenchmark_tf-idf_word_embeddings.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 1,444
|
2019-07-25T11:53:48.000Z
|
2022-03-31T15:13:32.000Z
|
examples/training/avg_word_embeddings/training_stsbenchmark_tf-idf_word_embeddings.py
|
faezakamran/sentence-transformers
|
2158fff3aa96651b10fe367c41fdd5008a33c5c6
|
[
"Apache-2.0"
] | 1,567
|
2019-07-26T15:19:28.000Z
|
2022-03-31T19:57:35.000Z
|
"""
This example weights word embeddings (like GloVe) with IDF weights. The IDF weights can for example be computed on Wikipedia.
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/ for available word embeddings files
You can get term-document frequencies from here:
https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/wikipedia_doc_frequencies.txt
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import *
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = 'output/training_tf-idf_word_embeddings-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Wikipedia document frequency for words
wiki_doc_freq = 'wikipedia_doc_frequencies.txt'
if not os.path.exists(wiki_doc_freq):
util.http_get('https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/wikipedia_doc_frequencies.txt', wiki_doc_freq)
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
##### Construction of the SentenceTransformer Model #####
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file('glove.6B.300d.txt.gz')
# Weight word embeddings using Inverse-Document-Frequency (IDF) values.
# For each word in the vocab ob the tokenizer, we must specify a weight value.
# The word embedding is then multiplied by this value
vocab = word_embedding_model.tokenizer.get_vocab()
word_weights = {}
lines = open(wiki_doc_freq, encoding='utf8').readlines()
num_docs = int(lines[0])
for line in lines[1:]:
word, freq = line.strip().split("\t")
word_weights[word] = math.log(num_docs/int(freq))
# Words in the vocab that are not in the doc_frequencies file get a frequency of 1
unknown_word_weight = math.log(num_docs/1)
# Initialize the WordWeights model. This model must be between the WordEmbeddings and the Pooling model
word_weights = models.WordWeights(vocab=vocab, word_weights=word_weights, unknown_word_weight=unknown_word_weight)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, word_weights, pooling_model, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
model.evaluate(evaluator)
| 41.366412
| 130
| 0.730947
|
11deea198d9829cb63efd6010dd70a37a61873fd
| 16,716
|
py
|
Python
|
pygama/analysis/histograms.py
|
iguinn/pygama
|
ae643524a49c476c0c5b35407a6ad82ba5d0401c
|
[
"Apache-2.0"
] | 13
|
2019-05-01T01:37:30.000Z
|
2022-03-18T08:52:19.000Z
|
pygama/analysis/histograms.py
|
iguinn/pygama
|
ae643524a49c476c0c5b35407a6ad82ba5d0401c
|
[
"Apache-2.0"
] | 111
|
2019-03-25T00:50:48.000Z
|
2022-03-30T17:13:43.000Z
|
pygama/analysis/histograms.py
|
iguinn/pygama
|
ae643524a49c476c0c5b35407a6ad82ba5d0401c
|
[
"Apache-2.0"
] | 52
|
2019-01-24T21:05:04.000Z
|
2022-03-07T23:37:55.000Z
|
"""
pygama convenience functions for 1D histograms.
1D hists in pygama require 3 things available from all implementations
of 1D histograms of numerical data in python: hist, bins, and var:
- hist: an array of histogram values
- bins: an array of bin edges
- var: an array of variances in each bin
If var is not provided, pygama assuems that the hist contains "counts" with
variance = counts (Poisson stats)
These are just convenience functions, provided for your convenience. Hopefully
they will help you if you need to do something trickier than is provided (e.g.
2D hists).
"""
import numpy as np
import matplotlib.pyplot as plt
import pygama.utils as pgu
from pylab import rcParams
def get_hist(data, bins=None, range=None, dx=None, wts=None):
""" return hist, bins, var after binning data
This is just a wrapper for numpy.histogram, with optional weights for each
element and proper computing of variances.
Note: there are no overflow / underflow bins.
Available binning methods:
- Default (no binning arguments) : 100 bins over an auto-detected range
- bins=N, range=(x_lo, x_hi) : N bins over the specified range (or leave
range=None for auto-detected range)
- bins=[str] : use one of np.histogram's automatic binning algorithms
- bins=bin_edges_array : array lower bin edges, supports non-uniform binning
- dx=dx, range=(x_lo, x_hi): bins of width dx over the specified range.
Note: dx overrides the bins argument!
Parameters
----------
data : array like
The array of data to be histogrammed
bins: int, array, or str (optional)
int: the number of bins to be used in the histogram
array: an array of bin edges to use
str: the name of the np.histogram automatic binning algorithm to use
If not provided, np.histogram's default auto-binning routine is used
range : tuple (float, float) (optional)
(x_lo, x_high) is the tuple of low and high x values to uses for the
very ends of the bin range. If not provided, np.histogram chooses the
ends based on the data in data
dx : float (optional)
Specifies the bin width. Overrides "bins" if both arguments are present
wts : float or array like (optional)
Array of weights for each bin. For example, if you want to divide all
bins by a time T to get the bin contents in count rate, set wts = 1/T.
Variances will be computed for each bin that appropriately account for
each data point's weighting.
Returns
-------
hist, bins, var : tuple (array, array, array)
hist : the values in each bin of the histogram
bins : an array of bin edges: bins[i] is the lower edge of the ith bin.
Note: it includes the upper edge of the last bin and does not
include underflow or overflow bins. So len(bins) = len(hist) + 1
var : array of variances in each bin of the histogram
"""
if bins is None:
bins = 100 # override np.histogram default of just 10
if dx is not None:
bins = int((range[1] - range[0]) / dx)
# bins includes left edge of first bin and right edge of all other bins
hist, bins = np.histogram(data, bins=bins, range=range, weights=wts)
if wts is None:
# no weights: var = hist, but return a copy so that mods to var don't
# modify hist.
# Note: If you don't want a var copy, just call np.histogram()
return hist, bins, hist.copy()
else:
# get the variances by binning with double the weight
var, bins = np.histogram(data, bins=bins, weights=wts*wts)
return hist, bins, var
def better_int_binning(x_lo=0, x_hi=None, dx=None, n_bins=None):
""" Get a good binning for integer data.
Guarantees an integer bin width.
At least two of x_hi, dx, or n_bins must be provided.
Parameters
----------
x_lo : float
Desired low x value for the binning
x_hi : float
Desired high x value for the binning
dx : float
Desired bin width
n_bins : float
Desired number of bins
Returns
-------
x_lo: int
int values for best x_lo
x_hi: int
int values for best x_hi, returned if x_hi is not None
dx : int
best int bin width, returned if arg dx is not None
n_bins : int
best int n_bins, returned if arg n_bins is not None
"""
# process inputs
n_Nones = int(x_hi is None) + int(dx is None) + int(n_bins is None)
if n_Nones > 1:
print('better_int_binning: must provide two of x_hi, dx or n_bins')
return
if n_Nones == 0:
print('better_int_binning: overconstrained. Ignoring x_hi.')
x_hi = None
# get valid dx or n_bins
if dx is not None:
if dx <= 0:
print(f'better_int_binning: invalid dx={dx}')
return
dx = np.round(dx)
if dx == 0: dx = 1
if n_bins is not None:
if n_bins <= 0:
print(f'better_int_binning: invalid n_bins={n_bins}')
return
n_bins = np.round(n_bins)
# can already return if no x_hi
if x_hi is None: # must have both dx and n_bins
return int(x_lo), int(dx), int(n_bins)
# x_hi is valid. Get a valid dx if we don't have one
if dx is None: # must have n_bins
dx = np.round((x_hi-x_lo)/n_bins)
if dx == 0: dx = 1
# Finally, build a good binning from dx
n_bins = np.ceil((x_hi-x_lo)/dx)
x_lo = np.floor(x_lo)
x_hi = x_lo + n_bins*dx
if n_bins is None: return int(x_lo), int(x_hi), int(dx)
else: return int(x_lo), int(x_hi), int(n_bins)
def get_bin_centers(bins):
"""
Returns an array of bin centers from an input array of bin edges.
Works for non-uniform binning. Note: a new array is allocated
Parameters:
"""
return (bins[:-1] + bins[1:]) / 2.
def get_bin_widths(bins):
"""
Returns an array of bin widths from an input array of bin edges.
Works for non-uniform binning.
"""
return (bins[1:] - bins[:-1])
def find_bin(x, bins):
"""
Returns the index of the bin containing x
Returns -1 for underflow, and len(bins) for overflow
For uniform bins, jumps directly to the appropriate index.
For non-uniform bins, binary search is used.
"""
# first handle overflow / underflow
if len(bins) == 0: return 0 # i.e. overflow
if x < bins[0]: return -1
if x > bins[-1]: return len(bins)
# we are definitely in range, and there are at least 2 bin edges, one below
# and one above x. try assuming uniform bins
dx = bins[1]-bins[0]
index = int(np.floor((x-bins[0])/dx))
if bins[index] <= x and bins[index+1] > x: return index
# bins are non-uniform: find by binary search
return np.searchsorted(bins, x, side='right')
def range_slice(x_min, x_max, hist, bins, var=None):
i_min = find_bin(x_min, bins)
i_max = find_bin(x_max, bins)
if var is not None: var = var[i_min:i_max]
return hist[i_min:i_max], bins[i_min:i_max+1], var
def get_fwhm(hist, bins, var=None, mx=None, dmx=0, bl=0, dbl=0, method='bins_over_f', n_slope=3):
"""
Estimate the FWHM of data in a histogram
See get_fwfm for parameters and return values
"""
if len(bins) == len(hist):
print("note: this function has been updated to require bins rather",
"than bin_centers. Don't trust this result")
return get_fwfm(0.5, hist, bins, var, mx, dmx, bl, dbl, method, n_slope)
def get_fwfm(fraction, hist, bins, var=None, mx=None, dmx=0, bl=0, dbl=0, method='bins_over_f', n_slope=3):
"""
Estimate the full width at some fraction of the max of data in a histogram
Typically used by sending slices around a peak. Searches about argmax(hist)
for the peak to fall by [fraction] from mx to bl
Paramaters
----------
fraction : float
The fractional amplitude at which to evaluate the full width
hist : array-like
The histogram data array containing the peak
bin_centers : array-like
An array of bin centers for the histogram
var : array-like (optional)
An array of histogram variances. Used with the 'fit_slopes' method
mx : float or tuple(float, float) (optional)
The value to use for the max of the peak. If None, np.amax(hist) is
used.
dmx : float (optional)
The uncertainty in mx
bl : float or tuple (float, float) (optional)
Used to specify an offset from which to estimate the FWFM.
dbl : float (optional)
The uncertainty in the bl
method : string
'bins_over_f' : the simplest method: just take the diffence in the bin
centers that are over [fraction] of max. Only works for high stats and
FWFM/bin_width >> 1
'interpolate' : interpolate between the bins that cross the [fration]
line. Works well for high stats and a reasonable number of bins.
Uncertainty incorporates var, if provided.
'fit_slopes' : fit over n_slope bins in the vicinity of the FWFM and
interpolate to get the fractional crossing point. Works okay even
when stats are moderate but requires enough bins that dx traversed
by n_slope bins is approximately linear. Incorporates bin variances
in fit and final uncertainties if provided.
Returns
-------
fwfm, dfwfm : float, float
fwfm: the full width at [fraction] of the maximum above bl
dfwfm: the uncertainty in fwfm
Examples
--------
>>> import pygama.analysis.histograms as pgh
>>> from numpy.random import normal
>>> hist, bins, var = pgh.get_hist(normal(size=10000), bins=100, range=(-5,5))
>>> pgh.get_fwfm(0.5, hist, bins, var, method='bins_over_f')
(2.2, 0.15919638684132664) # may vary
>>> pgh.get_fwfm(0.5, hist, bins, var, method='interpolate')
(2.2041666666666666, 0.09790931254396479) # may vary
>>> pgh.get_fwfm(0.5, hist, bins, var, method='fit_slopes')
(2.3083363869003466, 0.10939486522749278) # may vary
"""
# find bins over [fraction]
if mx is None:
mx = np.amax(hist)
if var is not None and dmx == 0:
dmx = np.sqrt(var[np.argmax(hist)])
idxs_over_f = hist > (bl + fraction * (mx-bl))
# argmax will return the index of the first occurence of a maximum
# so we can use it to find the first and last time idxs_over_f is "True"
bin_lo = np.argmax(idxs_over_f)
bin_hi = len(idxs_over_f) - np.argmax(idxs_over_f[::-1])
bin_centers = get_bin_centers(bins)
# precalc dheight: uncertainty in height used as the threshold
dheight2 = (fraction*dmx)**2 + ((1-fraction)*dbl)**2
if method == 'bins_over_f':
# the simplest method: just take the diffence in the bin centers
fwfm = bin_centers[bin_hi] - bin_centers[bin_lo]
# compute rough uncertainty as [bin width] (+) [dheight / slope]
dx = bin_centers[bin_lo] - bin_centers[bin_lo-1]
dy = hist[bin_lo] - hist[bin_lo-1]
if dy == 0: dy = (hist[bin_lo+1] - hist[bin_lo-2])/3
dfwfm2 = dx**2 + dheight2 * (dx/dy)**2
dx = bin_centers[bin_hi+1] - bin_centers[bin_hi]
dy = hist[bin_hi] - hist[bin_hi+1]
if dy == 0: dy = (hist[bin_hi-1] - hist[bin_hi+2])/3
dfwfm2 += dx**2 + dheight2 * (dx/dy)**2
return fwfm, np.sqrt(dfwfm2)
elif method == 'interpolate':
# interpolate between the two bins that cross the [fraction] line
# works well for high stats
if bin_lo < 1 or bin_hi >= len(hist)-1:
print(f"get_fwhm: can't interpolate ({bin_lo}, {bin_hi})")
return 0, 0
val_f = bl + fraction*(mx-bl)
# x_lo
dx = bin_centers[bin_lo] - bin_centers[bin_lo-1]
dhf = val_f - hist[bin_lo-1]
dh = hist[bin_lo] - hist[bin_lo-1]
x_lo = bin_centers[bin_lo-1] + dx * dhf/dh
# uncertainty
dx2_lo = 0
if var is not None:
dx2_lo = (dhf/dh)**2 * var[bin_lo] + ((dh-dhf)/dh)**2 * var[bin_lo-1]
dx2_lo *= (dx/dh)**2
dDdh = -dx/dh
# x_hi
dx = bin_centers[bin_hi+1] - bin_centers[bin_hi]
dhf = hist[bin_hi] - val_f
dh = hist[bin_hi] - hist[bin_hi+1]
if dh == 0:
raise ValueError(f"get_fwhm: interpolation failed, dh == 0")
x_hi = bin_centers[bin_hi] + dx * dhf/dh
if x_hi < x_lo:
raise ValueError(f"get_fwfm: interpolation produced negative fwfm")
# uncertainty
dx2_hi = 0
if var is not None:
dx2_hi = (dhf/dh)**2 * var[bin_hi+1] + ((dh-dhf)/dh)**2 * var[bin_hi]
dx2_hi *= (dx/dh)**2
dDdh += dx/dh
return x_hi - x_lo, np.sqrt(dx2_lo + dx2_hi + dDdh**2 * dheight2)
elif method == 'fit_slopes':
# evaluate the [fraction] point on a line fit to n_slope bins near the crossing.
# works okay even when stats are moderate
val_f = bl + fraction*(mx-bl)
# x_lo
i_0 = bin_lo - int(np.floor(n_slope/2))
if i_0 < 0:
print(f"get_fwfm: fit slopes failed")
return 0, 0
i_n = i_0 + n_slope
wts = None if var is None else 1/np.sqrt(var[i_0:i_n]) #fails for any var = 0
wts = [w if w != np.inf else 0 for w in wts]
try:
(m, b), cov = np.polyfit(bin_centers[i_0:i_n], hist[i_0:i_n], 1, w=wts, cov='unscaled')
except np.linalg.LinAlgError:
print(f"get_fwfm: LinAlgError")
return 0, 0
x_lo = (val_f-b)/m
#uncertainty
dxl2 = cov[0,0]/m**2 + (cov[1,1] + dheight2)/(val_f-b)**2 + 2*cov[0,1]/(val_f-b)/m
dxl2 *= x_lo**2
# x_hi
i_0 = bin_hi - int(np.floor(n_slope/2)) + 1
if i_0 == len(hist):
print(f"get_fwfm: fit slopes failed")
return 0, 0
i_n = i_0 + n_slope
wts = None if var is None else 1/np.sqrt(var[i_0:i_n])
wts = [w if w != np.inf else 0 for w in wts]
try:
(m, b), cov = np.polyfit(bin_centers[i_0:i_n], hist[i_0:i_n], 1, w=wts, cov='unscaled')
except np.linalg.LinAlgError:
print(f"get_fwfm: LinAlgError")
return 0, 0
x_hi = (val_f-b)/m
if x_hi < x_lo:
print(f"get_fwfm: fit slopes produced negative fwfm")
return 0, 0
#uncertainty
dxh2 = cov[0,0]/m**2 + (cov[1,1] + dheight2)/(val_f-b)**2 + 2*cov[0,1]/(val_f-b)/m
dxh2 *= x_hi**2
return x_hi - x_lo, np.sqrt(dxl2 + dxh2)
else:
print(f"get_fwhm: unrecognized method {method}")
return 0, 0
def plot_hist(hist, bins, var=None, show_stats=False, stats_hloc=0.75, stats_vloc=0.85, **kwargs):
"""
plot a step histogram, with optional error bars
"""
if var is None:
# the concat calls get the steps to draw correctly at the range boundaries
# where="post" tells plt to draw the step y[i] between x[i] and x[i+1]
plt.step(np.concatenate(([bins[0]], bins)), np.concatenate(([0], hist, [0])), where="post", **kwargs)
else:
plt.errorbar(get_bin_centers(bins), hist,
xerr=get_bin_widths(bins) / 2, yerr=np.sqrt(var),
fmt='none', **kwargs)
if show_stats is True:
bin_centers = get_bin_centers(bins)
N = np.sum(hist)
if N <= 1:
print("can't compute sigma for N =", N)
return
mean = np.sum(hist*bin_centers)/N
x2ave = np.sum(hist*bin_centers*bin_centers)/N
stddev = np.sqrt(N/(N-1) * (x2ave - mean*mean))
dmean = stddev/np.sqrt(N)
mean, dmean = pgu.get_formatted_stats(mean, dmean, 2)
stats = '$\mu=%s \pm %s$\n$\sigma=%#.3g$' % (mean, dmean, stddev)
stats_fontsize = rcParams['legend.fontsize']
plt.text(stats_hloc, stats_vloc, stats, transform=plt.gca().transAxes, fontsize = stats_fontsize)
def get_gaussian_guess(hist, bins):
"""
given a hist, gives guesses for mu, sigma, and amplitude
"""
if len(bins) == len(hist):
print("note: this function has been updated to require bins rather",
"than bin_centers. Don't trust this result")
max_idx = np.argmax(hist)
guess_e = (bins[max_idx] + bins[max_idx])/2 # bin center
guess_amp = hist[max_idx]
# find 50% amp bounds on both sides for a FWHM guess
guess_sigma = get_fwhm(hist, bins) / 2.355 # FWHM to sigma
guess_area = guess_amp * guess_sigma * np.sqrt(2 * np.pi)
return (guess_e, guess_sigma, guess_area)
| 37.229399
| 109
| 0.617253
|
a34c163c29a316bb88e3ed68c52e410785e76362
| 54
|
py
|
Python
|
pam/__init__.py
|
jbkalmbach/pam
|
91318dd2efaa1663eec4edb5a860dd7469ba4c2a
|
[
"MIT"
] | 2
|
2020-01-17T19:57:19.000Z
|
2020-01-24T18:12:57.000Z
|
pam/__init__.py
|
jbkalmbach/pam
|
91318dd2efaa1663eec4edb5a860dd7469ba4c2a
|
[
"MIT"
] | 8
|
2018-11-30T00:46:39.000Z
|
2020-01-15T19:20:10.000Z
|
pam/__init__.py
|
jbkalmbach/pam
|
91318dd2efaa1663eec4edb5a860dd7469ba4c2a
|
[
"MIT"
] | null | null | null |
from .gan import *
from .defaultArchitectures import *
| 27
| 35
| 0.796296
|
8ac211ed55e7ab79cbb5164647eca05045eef929
| 921
|
py
|
Python
|
src/MyAIGuide/data/fitbitDataGatheredFromWebExport.py
|
LeviBorodenko/MyAIGuide
|
3077173e63537522a49d50da5872147460c6469f
|
[
"MIT"
] | null | null | null |
src/MyAIGuide/data/fitbitDataGatheredFromWebExport.py
|
LeviBorodenko/MyAIGuide
|
3077173e63537522a49d50da5872147460c6469f
|
[
"MIT"
] | null | null | null |
src/MyAIGuide/data/fitbitDataGatheredFromWebExport.py
|
LeviBorodenko/MyAIGuide
|
3077173e63537522a49d50da5872147460c6469f
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import os
import os.path
import pickle
import re
import numpy as np
import pandas as pd
def fitbitDataGatheredFromWebExport(fname, data):
directory = os.fsencode(fname)
for file in os.listdir(directory):
name = os.fsdecode(file)
if name.endswith(".csv"):
filename = (fname + name)
with open(filename, newline="") as csvfile:
spamreader = csv.reader(csvfile)
count = 0
for row in spamreader:
count = count + 1
if count > 2 and len(row):
day = row[0][0:2]
month = row[0][3:5]
year = row[0][6:10]
date = year + "-" + month + "-" + day
data.loc[date, "steps"] = int(row[2].replace(",", ""))
data.loc[date, "denivelation"] = int(row[4])
return data
| 30.7
| 76
| 0.507058
|
6247e0adabd1ad839b87deca75ef1b737289015f
| 3,435
|
py
|
Python
|
Supervised_Categorization_Research/settings.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
Supervised_Categorization_Research/settings.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
Supervised_Categorization_Research/settings.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
"""
Django settings for Supervised_Categorization_Research project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gz$2j*msxfl4eb2qm-z^_4%4z(up7_&97i1qjjig8=aw8pef+u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost','127.0.0.1','159.89.160.154']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Questionnaire_type1',
'Questionnaire_type2',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Supervised_Categorization_Research.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Supervised_Categorization_Research.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
# SQL
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| 27.047244
| 91
| 0.704803
|
8eafe43919eafa787f2a86eb18ed99005d201642
| 393
|
py
|
Python
|
pythainlp/tag/__init__.py
|
preenet/pythainlp
|
52df5221174f32c04155973b452c24e569b7380a
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/tag/__init__.py
|
preenet/pythainlp
|
52df5221174f32c04155973b452c24e569b7380a
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/tag/__init__.py
|
preenet/pythainlp
|
52df5221174f32c04155973b452c24e569b7380a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Linguistic tagger.
Tagging each token in a sentence with supplementary information,
such as its Part-of-Speech (POS) tag, and Named Entity Recognition (NER) tag.
"""
__all__ = ["pos_tag", "pos_tag_sents", "tag_provinces"]
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.pos_tag import pos_tag
from pythainlp.tag.pos_tag import pos_tag_sents
| 28.071429
| 77
| 0.765903
|
f297546eb5c3eb51cda5cc8facc2c04ff1ffd43f
| 892
|
py
|
Python
|
permitify/src/von_connector/eventloop.py
|
bioharz/indy-ssivc-tutorial
|
13508142cea67b839f9edfa5e0ad6013bf56e398
|
[
"Apache-2.0"
] | 82
|
2018-05-08T23:27:07.000Z
|
2022-03-10T21:33:24.000Z
|
permitify/src/von_connector/eventloop.py
|
bioharz/indy-ssivc-tutorial
|
13508142cea67b839f9edfa5e0ad6013bf56e398
|
[
"Apache-2.0"
] | 35
|
2018-04-17T14:17:59.000Z
|
2019-03-14T10:37:54.000Z
|
permitify/src/von_connector/eventloop.py
|
bioharz/indy-ssivc-tutorial
|
13508142cea67b839f9edfa5e0ad6013bf56e398
|
[
"Apache-2.0"
] | 69
|
2018-05-08T16:26:06.000Z
|
2022-01-25T08:51:25.000Z
|
"""
Copyright 2017 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
def do(coro):
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro)
| 35.68
| 95
| 0.765695
|
b9a3d0dd3e21eed5df5e3e4e7f49db438105d970
| 5,138
|
py
|
Python
|
python-package/test/test_data_conversion.py
|
c-bata/xlearn
|
499a1836532e6efd9643bd1d17d76f5d874f8903
|
[
"Apache-2.0"
] | 3,144
|
2017-11-06T08:48:13.000Z
|
2022-03-20T00:35:50.000Z
|
python-package/test/test_data_conversion.py
|
c-bata/xlearn
|
499a1836532e6efd9643bd1d17d76f5d874f8903
|
[
"Apache-2.0"
] | 325
|
2017-11-18T12:59:39.000Z
|
2022-02-24T07:19:03.000Z
|
python-package/test/test_data_conversion.py
|
c-bata/xlearn
|
499a1836532e6efd9643bd1d17d76f5d874f8903
|
[
"Apache-2.0"
] | 612
|
2017-11-09T14:19:17.000Z
|
2022-01-24T10:17:49.000Z
|
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# This file test the data conversion for sklearn API
import unittest
import tempfile
import os
import numpy as np
from xlearn import write_data_to_xlearn_format
from scipy.sparse import csr_matrix
from sklearn.datasets import load_svmlight_file
class TestDataConversion(unittest.TestCase):
"""
Test data conversion to libsvm and libffm inside LRModel, FMModel and FFMModel
"""
def setUp(self):
# data generation
self.num_rows = 10
self.num_features = 4
self.X = np.random.randn(self.num_rows, self.num_features)
self.X[self.X < 0] = 0 # introduce sparsity
self.y = np.random.binomial(1, 0.5, size=(self.num_rows, 1))
self.fields = np.array([1, 2, 1, 0])
def _read_libffm_file(self, filename):
"""
An internal function for reading libffm back to numpy array.
"""
X_true = np.zeros((self.num_rows, self.num_features))
y_true = np.zeros((self.num_rows, 1))
field_true = np.zeros((self.num_features, 1))
with open(filename, 'r') as f:
i = 0
for line in f:
tmp_row = line.replace('\n', '').split(' ')
# extract label
y_true[i] = int(tmp_row[0])
# extract data and fields
for k in range(1, len(tmp_row)):
if len(tmp_row[k]) > 0:
tmp_str = tmp_row[k].split(':')
j = int(tmp_str[1])
field_true[j] = int(tmp_str[0])
tmp_data = float(tmp_str[2])
X_true[i, j] = tmp_data
i = i + 1
return X_true, y_true, field_true
def test_convert_numpy_to_libsvm(self):
"""
Test if the conversion between libsvm and numpy array is correct
"""
file = tempfile.NamedTemporaryFile(delete=False)
# write to temporary files
write_data_to_xlearn_format(self.X, self.y, file.name)
# load data back and compare if they are the same as original data
X_true, y_true = load_svmlight_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(self.X, X_true.todense()))
assert np.all(self.y.ravel() == y_true.ravel())
def test_convert_csr_to_libsvm(self):
"""
Test if the conversion between libsvm and csr matrix is correct
"""
X_spase = csr_matrix(self.X)
file = tempfile.NamedTemporaryFile(delete=False)
# write to temporary files
write_data_to_xlearn_format(X_spase, self.y, file.name)
# load data back and compare if they are the same as original data
X_true, y_true = load_svmlight_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(X_spase.todense(), X_true.todense()))
assert np.all(self.y.ravel() == y_true.ravel())
def test_convert_numpy_to_libffm(self):
"""
Test if the conversion between libffm and numpy array is correct
"""
file = tempfile.NamedTemporaryFile(delete=False)
# write data to libffm format
write_data_to_xlearn_format(self.X, self.y, file.name, fields=self.fields)
# read back data from file
X_true, y_true, field_true = self._read_libffm_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(self.X, X_true))
assert np.all(self.y.ravel() == y_true.ravel())
assert np.all(self.fields.ravel() == field_true.ravel())
def test_convert_csr_to_libffm(self):
"""
Test if the conversion between libffm and csr matrix is correct
"""
X_sparse = csr_matrix(self.X)
file = tempfile.NamedTemporaryFile(delete=False)
# write data to libffm format
write_data_to_xlearn_format(X_sparse, self.y, file.name, fields=self.fields)
# read back data from file
X_true, y_true, field_true = self._read_libffm_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(X_sparse.todense(), X_true))
assert np.all(self.y.ravel() == y_true.ravel())
assert np.all(self.fields.ravel() == field_true.ravel())
if __name__ == '__main__':
unittest.main()
| 35.191781
| 84
| 0.622421
|
e4abe8fd44e71c9547f71ac14673e1454bf0c656
| 3,026
|
py
|
Python
|
inspect_live_objects/traceback_example.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
inspect_live_objects/traceback_example.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
inspect_live_objects/traceback_example.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
'''
This module provides a standard interface to extract,
format and print stack traces of Python programs.
It exactly mimics the behavior of the Python interpreter when it prints a stack trace.
This is useful when you want to print stack traces under program control,
such as in a “wrapper” around the interpreter.
The module uses traceback objects — this is the object type that is stored in
the variables sys.exc_traceback (deprecated)
and sys.last_traceback and returned as the third item from sys.exc_info().
'''
# EXAMPLE 1:
# ==============================================================================
# This simple example implements a basic read-eval-print loop,
# similar to (but less useful than) the standard Python interactive interpreter loop.
# For a more complete implementation of the interpreter loop, refer to the code module.
import sys, traceback
def run_user_code(envdir):
source = raw_input(">>> ")
try:
exec source in envdir
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
envdir = {}
while 1:
run_user_code(envdir)
# EXAMPLE 2: Demonstrates the different ways to print and format the exception and traceback:
# ==============================================================================
import sys, traceback
def lumberjack():
bright_side_of_death()
def bright_side_of_death():
return tuple()[0]
try:
lumberjack()
except IndexError:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
print "*** format_exc, first and last line:"
formatted_lines = traceback.format_exc().splitlines()
print formatted_lines[0]
print formatted_lines[-1]
print "*** format_exception:"
print repr(traceback.format_exception(exc_type, exc_value,
exc_traceback))
print "*** extract_tb:"
print repr(traceback.extract_tb(exc_traceback))
print "*** format_tb:"
print repr(traceback.format_tb(exc_traceback))
print "*** tb_lineno:", exc_traceback.tb_lineno
# EXAMPLE 3: This example shows the different ways to print and format the stack:
# ==============================================================================
import traceback
def another_function():
lumberstack()
def lumberstack():
traceback.print_stack()
print repr(traceback.extract_stack())
print repr(traceback.format_stack())
# EXAMPLE 4: This last example demonstrates the final few formatting functions:
# ==============================================================================
import traceback
traceback.format_list([('spam.py', 3, '<module>', 'spam.eggs()'), ('eggs.py', 42, 'eggs', 'return "bacon"')])
| 35.186047
| 109
| 0.630205
|
87691cf5a980b8d00b1b4e77eed80429009b46ee
| 7,942
|
py
|
Python
|
tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 8
|
2019-04-09T21:13:05.000Z
|
2021-11-23T17:25:18.000Z
|
tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 7
|
2021-01-07T23:42:14.000Z
|
2021-12-13T20:43:42.000Z
|
tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
import json
import sys
from os.path import dirname, join
from six.moves import cStringIO as StringIO
from mozlog import handlers, structuredlog
sys.path.insert(0, join(dirname(__file__), "..", ".."))
from formatters.chromium import ChromiumFormatter
def test_chromium_required_fields(capfd):
# Test that the test results contain a handful of required fields.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# output a bunch of stuff
logger.suite_start(["test-id-1"], run_info={}, time=123)
logger.test_start("test-id-1")
logger.test_end("test-id-1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_obj = json.load(output)
# Check for existence of required fields
assert "interrupted" in output_obj
assert "path_delimiter" in output_obj
assert "version" in output_obj
assert "num_failures_by_type" in output_obj
assert "tests" in output_obj
test_obj = output_obj["tests"]["test-id-1"]
assert "actual" in test_obj
assert "expected" in test_obj
def test_chromium_test_name_trie(capfd):
# Ensure test names are broken into directories and stored in a trie with
# test results at the leaves.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# output a bunch of stuff
logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"], run_info={},
time=123)
logger.test_start("/foo/bar/test-id-1")
logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
logger.test_start("/foo/test-id-2")
logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_obj = json.load(output)
# Ensure that the test names are broken up by directory name and that the
# results are stored at the leaves.
test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
assert test_obj["actual"] == "TIMEOUT"
assert test_obj["expected"] == "FAIL"
test_obj = output_obj["tests"]["foo"]["test-id-2"]
# The ERROR status is mapped to FAIL for Chromium
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "TIMEOUT"
def test_num_failures_by_type(capfd):
# Test that the number of failures by status type is correctly calculated.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run some tests with different statuses: 3 passes, 1 timeout
logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="PASS", expected="PASS")
logger.test_start("t2")
logger.test_end("t2", status="PASS", expected="PASS")
logger.test_start("t3")
logger.test_end("t3", status="PASS", expected="FAIL")
logger.test_start("t4")
logger.test_end("t4", status="TIMEOUT", expected="CRASH")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
num_failures_by_type = json.load(output)["num_failures_by_type"]
# We expect 3 passes and 1 timeout, nothing else.
assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
assert num_failures_by_type["PASS"] == 3
assert num_failures_by_type["TIMEOUT"] == 1
def test_subtest_messages(capfd):
# Tests accumulation of test output
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run two tests with subtest messages. The subtest name should be included
# in the output. We should also tolerate missing messages.
logger.suite_start(["t1", "t2"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_end("t1", status="PASS", expected="PASS")
logger.test_start("t2")
# Currently, subtests with empty messages will be ignored
logger.test_status("t2", status="PASS", subtest="t2_a")
# A test-level message will also be appended
logger.test_end("t2", status="TIMEOUT", expected="PASS",
message="t2_message")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n"
t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
assert t2_log == "[TIMEOUT] t2_message\n"
def test_subtest_failure(capfd):
# Tests that a test fails if a subtest fails
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
formatter = ChromiumFormatter()
logger.add_handler(handlers.StreamHandler(output, formatter))
# Run a test with some subtest failures.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
message="t1_c_message")
# Make sure the test name was added to the set of tests with subtest fails
assert "t1" in formatter.tests_with_subtest_fails
# The test status is reported as a pass here because the harness was able to
# run the test to completion.
logger.test_end("t1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n" \
"[TIMEOUT] t1_c: t1_c_message\n"
# The status of the test in the output is a failure because subtests failed,
# despite the harness reporting that the test passed.
assert test_obj["actual"] == "FAIL"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails
| 37.11215
| 80
| 0.67376
|
b4ee3780bc05c7549582e862d26ebf84c7897113
| 716
|
py
|
Python
|
dlint/linters/format_string.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
dlint/linters/format_string.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
dlint/linters/format_string.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from . import base
class FormatStringLinter(base.BaseLinter):
"""This linter looks for use of Python's format string operator, e.g. '%'.
Use of the format string operator in Python can lead to information
disclosure, DoS, etc, if not done properly[1].
[1] http://www.drdobbs.com/security/programming-language-format-string-vulne/197002914?pgno=3
"""
# We're still working on this one...
off_by_default = True
_code = 'DUO104'
_error_tmpl = 'DUO104 avoid format strings, please use "format" function'
def visit_BinOp(self, node):
pass
| 24.689655
| 97
| 0.691341
|
64cf43bace1ab590c1ed4ba111c5c8f668a526bc
| 6,271
|
py
|
Python
|
tool_validation/validate_Estimate_Bankfull.py
|
FluvialGeomorph/FluvialGeomorph-toolbox
|
d7b0712d4841af5cc341443d695bf0481eeccb23
|
[
"CC0-1.0"
] | null | null | null |
tool_validation/validate_Estimate_Bankfull.py
|
FluvialGeomorph/FluvialGeomorph-toolbox
|
d7b0712d4841af5cc341443d695bf0481eeccb23
|
[
"CC0-1.0"
] | 1
|
2021-03-25T12:38:23.000Z
|
2021-03-25T12:38:23.000Z
|
tool_validation/validate_Estimate_Bankfull.py
|
FluvialGeomorph/FluvialGeomorph-toolbox
|
d7b0712d4841af5cc341443d695bf0481eeccb23
|
[
"CC0-1.0"
] | null | null | null |
"""____________________________________________________________________________
Script Name: _validate_Estimate_Bankfull.py
Description: Used to validate the script tool _Estimate_Bankfull.py.
Date: 09/12/2020
Usage:
This code gets copied into the Validation tab of the script tool.
Features:
* Populates the `regions` parameter's value list. Use the `update_regions.R`
function to get the latest list of regions.
____________________________________________________________________________"""
import arcpy
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup arcpy and the list of tool parameters."""
self.params = arcpy.GetParameterInfo()
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# Set the list of regions
self.params[18].filter.list = ["Altlantic Plain",
"Appalachain Highlands",
"Central and Southern AZ",
"Eastern - Appalachian Plateau, New England, Valley and Ridge",
"Eastern - Coastal Plain",
"Eastern - Piedmont",
"Eastern AZ/NM",
"Eastern Highlands",
"Eastern United States",
"IL River LTE 120",
"IL River LTE 300",
"IL River Panther Creek",
"Illinois River",
"IN Central Till Plain",
"IN Northern Moraine and Lake",
"IN Southern Hills and Lowlands",
"Interior Highlands",
"Interior Plains",
"Intermontane Plateau",
"Laurentian Upland",
"Lower Southern Driftless",
"MA",
"MD Allegheny Plateau/Valley and Ridge",
"MD Eastern Coastal Plain",
"MD Piedmont",
"MD Western Coastal Plain",
"ME Coastal and Central",
"MI Southern Lower Ecoregion",
"Mid-Atlantic",
"Minnesota Eastern",
"Minnesota Western",
"NC Coastal Plain",
"NC Mountains",
"NC Piedmont Rural",
"NC Piedmont Urban",
"New England",
"NH",
"Northeast - Appalachian Plateau, Coastal Plain, New England, Piedmont, Valley and Ridge",
"Northeast - Appalachian Plateau, New England, Piedmont, Valley and Ridge",
"Northern Appalachians",
"NY Hydrologic Region 1/2",
"NY Hydrologic Region 3",
"NY Hydrologic Region 4/4a",
"NY Hydrologic Region 5",
"NY Hydrologic Region 6",
"NY Hydrologic Region 7",
"OH Region A",
"OH Region B",
"ON Southern",
"PA Carbonate Areas",
"PA Non-Carbonate Areas",
"PA Piedmont 1",
"PA Piedmont 2",
"Pacific Maritime Mountain",
"Pacific Mountain System",
"Pacific Northwest",
"Rocky Mountain System",
"San Francisco Bay",
"Southern Appalachians",
"Southern Driftless",
"Upper Green River",
"Upper Salmon River",
"USA",
"VA Piedmont",
"VA, MD Coastal Plain",
"VA, MD, WV Valley and Ridge",
"VT",
"West Interior Basin and Range",
"Western Cordillera",
"WV Appalachian Plateau",
"WV Eastern Valley and Ridge",
"WV Western Appalachian Plateau"]
# Set the default region
self.params[18].value = "USA"
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
| 54.530435
| 131
| 0.383352
|
4124542e46f610bc98426264e06fe9be29c1ad32
| 9,738
|
py
|
Python
|
colour/difference/cam02_ucs.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | 2
|
2020-06-20T03:44:41.000Z
|
2020-06-20T14:08:41.000Z
|
colour/difference/cam02_ucs.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | null | null | null |
colour/difference/cam02_ucs.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:math:`\\Delta E'` - Delta E Colour Difference - Luo, Cui and Li (2006)
======================================================================
Defines :math:`\\Delta E'` colour difference computation objects based on *Luo
et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces:
- :func:`colour.difference.delta_E_CAM02LCD`
- :func:`colour.difference.delta_E_CAM02SCD`
- :func:`colour.difference.delta_E_CAM02UCS`
References
----------
- :cite:`Luo2006b` : Luo, M. Ronnier, Cui, G., & Li, C. (2006). Uniform
colour spaces based on CIECAM02 colour appearance model. Color Research &
Application, 31(4), 320-330. doi:10.1002/col.20227
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import tsplit
from colour.models.cam02_ucs import COEFFICIENTS_UCS_LUO2006
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'delta_E_Luo2006', 'delta_E_CAM02LCD', 'delta_E_CAM02SCD',
'delta_E_CAM02UCS'
]
def delta_E_Luo2006(Jpapbp_1, Jpapbp_2, coefficients):
"""
Returns the difference :math:`\\Delta E'` between two given
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, or *CAM02-UCS* colourspaces
:math:`J'a'b'` arrays.
Parameters
----------
Jpapbp_1 : array_like
Standard / reference *Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, or
*CAM02-UCS* colourspaces :math:`J'a'b'` array.
Jpapbp_2 : array_like
Sample / test *Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, or
*CAM02-UCS* colourspaces :math:`J'a'b'` array.
coefficients : array_like
Coefficients of one of the *Luo et al. (2006)* *CAM02-LCD*,
*CAM02-SCD*, or *CAM02-UCS* colourspaces.
Returns
-------
numeric or ndarray
Colour difference :math:`\\Delta E'`.
Notes
-----
+--------------+------------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+==============+========================+====================+
| ``Jpapbp_1`` | ``Jp_1`` : [0, 100] | ``Jp_1`` : [0, 1] |
| | | |
| | ``ap_1`` : [-100, 100] | ``ap_1`` : [-1, 1] |
| | | |
| | ``bp_1`` : [-100, 100] | ``bp_1`` : [-1, 1] |
+--------------+------------------------+--------------------+
| ``Jpapbp_2`` | ``Jp_2`` : [0, 100] | ``Jp_2`` : [0, 1] |
| | | |
| | ``ap_2`` : [-100, 100] | ``ap_2`` : [-1, 1] |
| | | |
| | ``bp_2`` : [-100, 100] | ``bp_2`` : [-1, 1] |
+--------------+------------------------+--------------------+
Examples
--------
>>> Jpapbp_1 = np.array([54.90433134, -0.08450395, -0.06854831])
>>> Jpapbp_2 = np.array([54.80352754, -3.96940084, -13.57591013])
>>> delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
... COEFFICIENTS_UCS_LUO2006['CAM02-LCD'])
... # doctest: +ELLIPSIS
0.0001034...
"""
J_p_1, a_p_1, b_p_1 = tsplit(Jpapbp_1)
J_p_2, a_p_2, b_p_2 = tsplit(Jpapbp_2)
K_L, _c_1, _c_2 = tsplit(coefficients)
d_E = np.sqrt(((J_p_1 - J_p_2) / K_L) ** 2 + (a_p_1 - a_p_2) ** 2 +
(b_p_1 - b_p_2) ** 2)
return d_E
def delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2):
"""
Returns the difference :math:`\\Delta E'` between two given
*Luo et al. (2006)* *CAM02-LCD* colourspaces :math:`J'a'b'` arrays.
Parameters
----------
Jpapbp_1 : array_like
Standard / reference *Luo et al. (2006)* *CAM02-LCD* colourspaces
:math:`J'a'b'` array.
Jpapbp_2 : array_like
Sample / test *Luo et al. (2006)* *CAM02-LCD* colourspaces
:math:`J'a'b'` array.
Returns
-------
numeric or ndarray
Colour difference :math:`\\Delta E'`.
Notes
-----
+--------------+------------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+==============+========================+====================+
| ``Jpapbp_1`` | ``Jp_1`` : [0, 100] | ``Jp_1`` : [0, 1] |
| | | |
| | ``ap_1`` : [-100, 100] | ``ap_1`` : [-1, 1] |
| | | |
| | ``bp_1`` : [-100, 100] | ``bp_1`` : [-1, 1] |
+--------------+------------------------+--------------------+
| ``Jpapbp_2`` | ``Jp_2`` : [0, 100] | ``Jp_2`` : [0, 1] |
| | | |
| | ``ap_2`` : [-100, 100] | ``ap_2`` : [-1, 1] |
| | | |
| | ``bp_2`` : [-100, 100] | ``bp_2`` : [-1, 1] |
+--------------+------------------------+--------------------+
References
----------
:cite:`Luo2006b`
Examples
--------
>>> Jpapbp_1 = np.array([54.90433134, -0.08450395, -0.06854831])
>>> Jpapbp_2 = np.array([54.80352754, -3.96940084, -13.57591013])
>>> delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2) # doctest: +ELLIPSIS
14.0555464...
"""
return delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-LCD'])
def delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2):
"""
Returns the difference :math:`\\Delta E'` between two given
*Luo et al. (2006)* *CAM02-SCD* colourspaces :math:`J'a'b'` arrays.
Parameters
----------
Jpapbp_1 : array_like
Standard / reference *Luo et al. (2006)* *CAM02-SCD* colourspaces
:math:`J'a'b'` array.
Jpapbp_2 : array_like
Sample / test *Luo et al. (2006)* *CAM02-SCD* colourspaces
:math:`J'a'b'` array.
Returns
-------
numeric or ndarray
Colour difference :math:`\\Delta E'`.
Notes
-----
+--------------+------------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+==============+========================+====================+
| ``Jpapbp_1`` | ``Jp_1`` : [0, 100] | ``Jp_1`` : [0, 1] |
| | | |
| | ``ap_1`` : [-100, 100] | ``ap_1`` : [-1, 1] |
| | | |
| | ``bp_1`` : [-100, 100] | ``bp_1`` : [-1, 1] |
+--------------+------------------------+--------------------+
| ``Jpapbp_2`` | ``Jp_2`` : [0, 100] | ``Jp_2`` : [0, 1] |
| | | |
| | ``ap_2`` : [-100, 100] | ``ap_2`` : [-1, 1] |
| | | |
| | ``bp_2`` : [-100, 100] | ``bp_2`` : [-1, 1] |
+--------------+------------------------+--------------------+
References
----------
:cite:`Luo2006b`
Examples
--------
>>> Jpapbp_1 = np.array([54.90433134, -0.08450395, -0.06854831])
>>> Jpapbp_2 = np.array([54.80352754, -3.96940084, -13.57591013])
>>> delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2) # doctest: +ELLIPSIS
14.0551718...
"""
return delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-SCD'])
def delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2):
"""
Returns the difference :math:`\\Delta E'` between two given
*Luo et al. (2006)* *CAM02-UCS* colourspaces :math:`J'a'b'` arrays.
Parameters
----------
Jpapbp_1 : array_like
Standard / reference *Luo et al. (2006)* *CAM02-UCS* colourspaces
:math:`J'a'b'` array.
Jpapbp_2 : array_like
Sample / test *Luo et al. (2006)* *CAM02-UCS* colourspaces
:math:`J'a'b'` array.
Returns
-------
numeric or ndarray
Colour difference :math:`\\Delta E'`.
Notes
-----
+--------------+------------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+==============+========================+====================+
| ``Jpapbp_1`` | ``Jp_1`` : [0, 100] | ``Jp_1`` : [0, 1] |
| | | |
| | ``ap_1`` : [-100, 100] | ``ap_1`` : [-1, 1] |
| | | |
| | ``bp_1`` : [-100, 100] | ``bp_1`` : [-1, 1] |
+--------------+------------------------+--------------------+
| ``Jpapbp_2`` | ``Jp_2`` : [0, 100] | ``Jp_2`` : [0, 1] |
| | | |
| | ``ap_2`` : [-100, 100] | ``ap_2`` : [-1, 1] |
| | | |
| | ``bp_2`` : [-100, 100] | ``bp_2`` : [-1, 1] |
+--------------+------------------------+--------------------+
References
----------
:cite:`Luo2006b`
Examples
--------
>>> Jpapbp_1 = np.array([54.90433134, -0.08450395, -0.06854831])
>>> Jpapbp_2 = np.array([54.80352754, -3.96940084, -13.57591013])
>>> delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2) # doctest: +ELLIPSIS
14.0552982...
"""
return delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-UCS'])
| 37.744186
| 78
| 0.40573
|
6d14c63c81cc87bc44147f27b5d009c1d3f55ed2
| 112
|
py
|
Python
|
Web3/FullStack_DeFi_App/tests/conftest.py
|
C-Mierez/Web3-Solidity
|
6a24f9cf87dc303c8e9e74e4e895bf306940afb3
|
[
"MIT"
] | 1
|
2022-02-02T21:46:45.000Z
|
2022-02-02T21:46:45.000Z
|
Web3/FullStack_DeFi_App/tests/conftest.py
|
C-Mierez/Web3-Solidity
|
6a24f9cf87dc303c8e9e74e4e895bf306940afb3
|
[
"MIT"
] | null | null | null |
Web3/FullStack_DeFi_App/tests/conftest.py
|
C-Mierez/Web3-Solidity
|
6a24f9cf87dc303c8e9e74e4e895bf306940afb3
|
[
"MIT"
] | null | null | null |
import pytest
from web3 import Web3
@pytest.fixture
def to_stake_amount():
return Web3.toWei(1, "ether")
| 12.444444
| 33
| 0.732143
|
9ccb7d6739b66a08d46580cc104651a304b45960
| 1,740
|
py
|
Python
|
tools/perf/page_sets/pathological_mobile_sites.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
tools/perf/page_sets/pathological_mobile_sites.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
tools/perf/page_sets/pathological_mobile_sites.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class PathologicalMobileSitesPage(page_module.Page):
def __init__(self, url, page_set):
super(PathologicalMobileSitesPage, self).__init__(
url=url, page_set=page_set, credentials_path='data/credentials.json',
shared_page_state_class=shared_page_state.SharedMobilePageState)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class PathologicalMobileSitesPageSet(story.StorySet):
"""Pathologically bad and janky sites on mobile."""
def __init__(self):
super(PathologicalMobileSitesPageSet, self).__init__(
archive_data_file='data/pathological_mobile_sites.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
sites = ['http://edition.cnn.com',
'http://m.espn.go.com/nhl/rankings',
'http://recode.net',
'http://sports.yahoo.com/',
'http://www.latimes.com',
('http://www.pbs.org/newshour/bb/'
'much-really-cost-live-city-like-seattle/#the-rundown'),
('http://www.theguardian.com/politics/2015/mar/09/'
'ed-balls-tory-spending-plans-nhs-charging'),
'http://www.zdnet.com',
'http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
'https://www.linkedin.com/in/linustorvalds']
for site in sites:
self.AddStory(PathologicalMobileSitesPage(site, self))
| 38.666667
| 77
| 0.696552
|
991eb596d956ceded74908388f05da8cafc36e78
| 4,793
|
py
|
Python
|
setup.py
|
mneundorfer/asn1crypto
|
3c9f12daee77738bd7eba9b0bf756e2514b30cc1
|
[
"MIT"
] | null | null | null |
setup.py
|
mneundorfer/asn1crypto
|
3c9f12daee77738bd7eba9b0bf756e2514b30cc1
|
[
"MIT"
] | null | null | null |
setup.py
|
mneundorfer/asn1crypto
|
3c9f12daee77738bd7eba9b0bf756e2514b30cc1
|
[
"MIT"
] | 1
|
2021-01-20T11:42:17.000Z
|
2021-01-20T11:42:17.000Z
|
import codecs
import os
import shutil
import sys
import warnings
import setuptools
from setuptools import setup, Command
from setuptools.command.egg_info import egg_info
PACKAGE_NAME = 'asn1crypto'
PACKAGE_VERSION = '1.4.0'
PACKAGE_ROOT = os.path.dirname(os.path.abspath(__file__))
# setuptools 38.6.0 and newer know about long_description_content_type, but
# distutils still complains about it, so silence the warning
sv = setuptools.__version__
svi = tuple(int(o) if o.isdigit() else o for o in sv.split('.'))
if svi >= (38, 6):
warnings.filterwarnings(
'ignore',
"Unknown distribution option: 'long_description_content_type'",
module='distutils.dist'
)
# Try to load the tests first from the source repository layout. If that
# doesn't work, we assume this file is in the release package, and the tests
# are part of the package {PACKAGE_NAME}_tests.
if os.path.exists(os.path.join(PACKAGE_ROOT, 'tests')):
tests_require = []
test_suite = 'tests.make_suite'
else:
tests_require = ['%s_tests' % PACKAGE_NAME]
test_suite = '%s_tests.make_suite' % PACKAGE_NAME
# This allows us to send the LICENSE and docs when creating a sdist. Wheels
# automatically include the LICENSE, and don't need the docs. For these
# to be included, the command must be "python setup.py sdist".
package_data = {}
if sys.argv[1:] == ['sdist'] or sorted(sys.argv[1:]) == ['-q', 'sdist']:
package_data[PACKAGE_NAME] = [
'../LICENSE',
'../*.md',
'../docs/*.md',
]
# Ensures a copy of the LICENSE is included with the egg-info for
# install and bdist_egg commands
class EggInfoCommand(egg_info):
def run(self):
egg_info_path = os.path.join(
PACKAGE_ROOT,
'%s.egg-info' % PACKAGE_NAME
)
if not os.path.exists(egg_info_path):
os.mkdir(egg_info_path)
shutil.copy2(
os.path.join(PACKAGE_ROOT, 'LICENSE'),
os.path.join(egg_info_path, 'LICENSE')
)
egg_info.run(self)
class CleanCommand(Command):
user_options = [
('all', 'a', '(Compatibility with original clean command)'),
]
def initialize_options(self):
self.all = False
def finalize_options(self):
pass
def run(self):
sub_folders = ['build', 'temp', '%s.egg-info' % PACKAGE_NAME]
if self.all:
sub_folders.append('dist')
for sub_folder in sub_folders:
full_path = os.path.join(PACKAGE_ROOT, sub_folder)
if os.path.exists(full_path):
shutil.rmtree(full_path)
for root, dirs, files in os.walk(os.path.join(PACKAGE_ROOT, PACKAGE_NAME)):
for filename in files:
if filename[-4:] == '.pyc':
os.unlink(os.path.join(root, filename))
for dirname in list(dirs):
if dirname == '__pycache__':
shutil.rmtree(os.path.join(root, dirname))
readme = ''
with codecs.open(os.path.join(PACKAGE_ROOT, 'readme.md'), 'r', 'utf-8') as f:
readme = f.read()
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=(
'Fast ASN.1 parser and serializer with definitions for private keys, '
'public keys, certificates, CRL, OCSP, CMS, PKCS#3, PKCS#7, PKCS#8, '
'PKCS#12, PKCS#5, X.509 and TSP'
),
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/wbond/asn1crypto',
author='wbond',
author_email='will@wbond.net',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
],
keywords='asn1 crypto pki x509 certificate rsa dsa ec dh',
packages=[PACKAGE_NAME],
package_data=package_data,
tests_require=tests_require,
test_suite=test_suite,
cmdclass={
'clean': CleanCommand,
'egg_info': EggInfoCommand,
}
)
| 30.144654
| 83
| 0.626747
|
ff18e632e59c3428086a4c096c36672835da720e
| 4,406
|
py
|
Python
|
Code/bases.py
|
Evansdava/CS-1.3-Core-Data-Structures
|
e101c89ecb0b58b67196bfc703491deaa053f447
|
[
"MIT"
] | null | null | null |
Code/bases.py
|
Evansdava/CS-1.3-Core-Data-Structures
|
e101c89ecb0b58b67196bfc703491deaa053f447
|
[
"MIT"
] | 6
|
2020-02-14T05:51:52.000Z
|
2020-03-10T17:05:44.000Z
|
Code/bases.py
|
Evansdava/CS-1.3-Core-Data-Structures
|
e101c89ecb0b58b67196bfc703491deaa053f447
|
[
"MIT"
] | null | null | null |
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)
"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
printable = string.digits + string.ascii_lowercase
digits = digits.lower()
"""
Check the length of the string
Loop from right to left
For each digit, multiply it by the power of the base
Add the results together
Return total
"""
# total = 0
# i = 1
# while i <= len(digits):
# dig = digits[-i]
# num = printable.find(dig)
# total += num * (base ** (i - 1))
# i += 1
# return total
decimal, i = 0, 0
for digit in reversed(digits):
decimal += printable.find(digit) * pow(base, i)
i += 1
return decimal
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)
"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
printable = string.digits + string.ascii_lowercase
"""
While the number can be divided by the base:
Divide the number by the base
Put the remainders in a list
If that number can be divided by the base, repeat this loop
The number of iterations of the loop is the number of digits - 1
For each digit, enter the remainder of the digit before it
"""
# remainders = get_remainders(number, base)
# result = ""
# for rem in reversed(remainders):
# result += printable[rem]
# remainders.clear()
# return result
num, survivor = divmod(number, base)
survivors = []
survivors.append(survivor)
while num > 0:
num, survivor = divmod(num, base)
survivors.append(survivor)
casual_ties = ""
for survivor in reversed(survivors):
casual_ties += printable[survivor]
return casual_ties
def get_remainders(number, base, remainders=[]):
"""Helper function for encode() to get a list of remainders"""
if number / base < 1:
remainders.append(number)
else:
num, mod = divmod(number, base)
remainders.append(mod)
remainders = get_remainders(num, base, remainders)
return remainders
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)
"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
return encode(decode(digits, base1), base2)
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0].lower()
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1,
result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
# print('Converts digits from base1 to base2')
# print(decode("9a", 16))
# print(encode(939, 16))
if __name__ == '__main__':
main()
| 34.155039
| 79
| 0.629369
|
0653dc5f108e956ecec4a91165c503ccd4cfd719
| 8,864
|
py
|
Python
|
tests/test_singleton.py
|
GlobalParametrics/TCRM
|
0882ad6f857c50c7f276ec2b3099b3006939664e
|
[
"CC-BY-4.0"
] | 1
|
2021-09-01T09:27:57.000Z
|
2021-09-01T09:27:57.000Z
|
tests/test_singleton.py
|
GlobalParametrics/TCRM
|
0882ad6f857c50c7f276ec2b3099b3006939664e
|
[
"CC-BY-4.0"
] | null | null | null |
tests/test_singleton.py
|
GlobalParametrics/TCRM
|
0882ad6f857c50c7f276ec2b3099b3006939664e
|
[
"CC-BY-4.0"
] | 1
|
2021-04-08T03:10:18.000Z
|
2021-04-08T03:10:18.000Z
|
import unittest
import time
from Utilities.singleton import Singleton, forgetAllSingletons, SingletonException
import threading
class TestSingleton(unittest.TestCase):
def testReturnsSameObject(self):
"""Test normal use case"""
#Demonstrates normal use -- just call getInstance and it returns a singleton instance
class A(Singleton):
def __init__(self):
super(A, self).__init__()
a1 = A.getInstance()
a2 = A.getInstance()
self.assertEquals(id(a1), id(a2))
def testInstantiateWithMultiArgConstructor(self):
"""Test constructor with args"""
# If the singleton needs args to construct, include them in the first
# call to getInstances.
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
b1 = B.getInstance('arg1 value', 'arg2 value')
b2 = B.getInstance()
self.assertEquals(b1.arg1, 'arg1 value')
self.assertEquals(b1.arg2, 'arg2 value')
self.assertEquals(id(b1), id(b2))
def testInstantiateWithKeywordArg(self):
"""Test instantiation with keyword args"""
class B(Singleton):
def __init__(self, arg1=5):
super(B, self).__init__()
self.arg1 = arg1
b1 = B.getInstance('arg1 value')
b2 = B.getInstance()
self.assertEquals(b1.arg1, 'arg1 value')
self.assertEquals(id(b1), id(b2))
def testTryToInstantiateWithoutNeededArgs(self):
"""Test instantiation without required arguments"""
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
self.assertRaises(SingletonException, B.getInstance)
def testPassTypeErrorIfAllArgsThere(self):
"""
Make sure the test for capturing missing args doesn't interfere with a normal TypeError.
"""
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
raise TypeError, 'some type error'
self.assertRaises(TypeError, B.getInstance, 1, 2)
def testTryToInstantiateWithoutGetInstance(self):
"""Test instantiation only occurs through getInstance()"""
# Demonstrates that singletons can ONLY be instantiated through
# getInstance, as long as they call Singleton.__init__ during construction.
# If this check is not required, you don't need to call Singleton.__init__().
class A(Singleton):
def __init__(self):
super(A, self).__init__()
self.assertRaises(SingletonException, A)
def testDontAllowNew(self):
"""Test instatiating illegal class raises SingletonException"""
def instantiatedAnIllegalClass():
class A(Singleton):
def __init__(self):
super(A, self).__init__()
def __new__(metaclass, strName, tupBases, dct):
return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct) # pylint: disable-msg=E0602
self.assertRaises(SingletonException, instantiatedAnIllegalClass)
def testDontAllowArgsAfterConstruction(self):
"""Test instatiation with args after construction"""
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
B.getInstance('arg1 value', 'arg2 value')
self.assertRaises(SingletonException, B, 'arg1 value', 'arg2 value')
def test_forgetClassInstanceReferenceForTesting(self):
"""Test destruction of class instances for testing"""
class A(Singleton):
def __init__(self):
super(A, self).__init__()
class B(A):
def __init__(self):
super(B, self).__init__()
# check that changing the class after forgetting the instance produces
# an instance of the new class
a = A.getInstance()
assert a.__class__.__name__ == 'A'
A._forgetClassInstanceReferenceForTesting()
b = B.getInstance()
assert b.__class__.__name__ == 'B'
# check that invoking the 'forget' on a subclass still deletes the instance
B._forgetClassInstanceReferenceForTesting()
a = A.getInstance()
B._forgetClassInstanceReferenceForTesting()
b = B.getInstance()
assert b.__class__.__name__ == 'B'
def test_forgetAllSingletons(self):
"""Test that all singletons are removed by forgetAllSingletons"""
# Should work if there are no singletons
forgetAllSingletons()
class A(Singleton):
ciInitCount = 0
def __init__(self):
super(A, self).__init__()
A.ciInitCount += 1
A.getInstance()
self.assertEqual(A.ciInitCount, 1)
A.getInstance()
self.assertEqual(A.ciInitCount, 1)
forgetAllSingletons()
A.getInstance()
self.assertEqual(A.ciInitCount, 2)
def test_threadedCreation(self):
"""Test that only one Singleton is created in multithread applications"""
# Check that only one Singleton is created even if multiple
# threads try at the same time. If fails, would see assert in _addSingleton
class Test_Singleton(Singleton):
def __init__(self):
super(Test_Singleton, self).__init__()
class Test_SingletonThread(threading.Thread):
def __init__(self, fTargetTime):
super(Test_SingletonThread, self).__init__()
self._fTargetTime = fTargetTime
self._eException = None
def run(self):
try:
fSleepTime = self._fTargetTime - time.time()
if fSleepTime > 0:
time.sleep(fSleepTime)
Test_Singleton.getInstance()
except Exception, e:
self._eException = e
fTargetTime = time.time() + 0.1
lstThreads = []
for _ in xrange(100):
t = Test_SingletonThread(fTargetTime)
t.start()
lstThreads.append(t)
eException = None
for t in lstThreads:
t.join()
if t._eException and not eException:
eException = t._eException
if eException:
raise eException #pylint: disable-msg=E0702
def testNoInit(self):
"""Demonstrates use with a class not defining __init__"""
class A(Singleton):
pass
#INTENTIONALLY UNDEFINED:
#def __init__(self):
# super(A, self).__init__()
A.getInstance() #Make sure no exception is raised
def testMultipleGetInstancesWithArgs(self):
"""Test multiple calls to getInstance with args"""
class A(Singleton):
ignoreSubsequent = True
def __init__(self, a, b=1):
pass
a1 = A.getInstance(1)
a2 = A.getInstance(2) # ignores the second call because of ignoreSubsequent
class B(Singleton):
def __init__(self, a, b=1):
pass
b1 = B.getInstance(1)
self.assertRaises(SingletonException, B.getInstance, 2) # No ignoreSubsequent included
class C(Singleton):
def __init__(self, a=1):
pass
c1 = C.getInstance(a=1)
self.assertRaises(SingletonException, C.getInstance, a=2) # No ignoreSubsequent included
def testInheritance(self):
"""Test Singleton inheritance"""
# It's sometimes said that you can't subclass a singleton (see, for instance,
# http://steve.yegge.googlepages.com/singleton-considered-stupid point e). This
# test shows that at least rudimentary subclassing works fine for us.
class A(Singleton):
def setX(self, x):
self.x = x
def setZ(self, z):
raise NotImplementedError
class B(A):
def setX(self, x):
self.x = -x
def setY(self, y):
self.y = y
a = A.getInstance()
a.setX(5)
b = B.getInstance()
b.setX(5)
b.setY(50)
self.assertEqual((a.x, b.x, b.y), (5, -5, 50))
self.assertRaises(AttributeError, eval, 'a.setY', {}, locals())
self.assertRaises(NotImplementedError, b.setZ, 500)
if __name__ == "__main__":
unittest.main()
| 33.198502
| 129
| 0.586304
|
25f7e8ded6a3da3648e9020870fc19e58b6a21f3
| 841
|
py
|
Python
|
pylegos/cli/templates/code/build.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | null | null | null |
pylegos/cli/templates/code/build.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | 2
|
2016-11-23T00:36:34.000Z
|
2016-11-23T00:39:08.000Z
|
pylegos/cli/templates/code/build.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | null | null | null |
from pylegos.core import FileUtils
from pylegos.core import PlatformProperty
class AppBuilder:
def __init__(self):
sep = PlatformProperty.FileSep
self.ProjectBaseDir = FileUtils.getParentDir(__file__) + sep
projectDirArray = self.ProjectBaseDir.strip(sep).split(sep)
self.ProjectName = projectDirArray[len(projectDirArray) - 1].lower()
self.DistDir = self.ProjectBaseDir+'dist'+sep
if not FileUtils().dirExists(self.DistDir):
FileUtils().createDir(self.DistDir)
FileUtils().touchFile(self.DistDir+'build.log')
def __buildDatabaseScripts(self):
pass
def buildApp(self):
pass
def buildInstaller(self):
pass
if __name__ == '__main__':
appBuilder = AppBuilder()
appBuilder.buildApp()
appBuilder.buildInstaller()
| 26.28125
| 76
| 0.676576
|
0811b1ddccf52d9855254d34776743f1bfeaa8ea
| 86
|
py
|
Python
|
utils/essfolder.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | 2
|
2021-12-26T06:26:06.000Z
|
2022-02-24T23:54:58.000Z
|
utils/essfolder.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | null | null | null |
utils/essfolder.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | null | null | null |
import os
def make():
if not os.path.isdir('./maps'):
os.mkdir('./maps')
| 14.333333
| 35
| 0.534884
|
e03915f0b7809755b7b12c323497623e88dff22c
| 8,826
|
py
|
Python
|
src/the_tale/the_tale/game/quests/tests/test_workers.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/quests/tests/test_workers.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/quests/tests/test_workers.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class QuestsGeneratorWorkerTests(clans_helpers.ClansTestsMixin,
emissaries_helpers.EmissariesTestsMixin,
utils_testcase.TestCase):
def setUp(self):
super(QuestsGeneratorWorkerTests, self).setUp()
self.places = game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_2 = self.accounts_factory.create_account()
self.account_3 = self.accounts_factory.create_account()
self.account_4 = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account_1)
self.storage.load_account_data(self.account_2)
self.storage.load_account_data(self.account_3)
self.storage.load_account_data(self.account_4)
self.hero_1 = self.storage.accounts_to_heroes[self.account_1.id]
self.hero_2 = self.storage.accounts_to_heroes[self.account_2.id]
self.hero_3 = self.storage.accounts_to_heroes[self.account_3.id]
self.hero_4 = self.storage.accounts_to_heroes[self.account_4.id]
self.prepair_forum_for_clans()
self.clan = self.create_clan(owner=self.account_1, uid=1)
self.emissary = self.create_emissary(clan=self.clan,
initiator=self.account_1,
place_id=self.places[0].id)
self.worker = quests_workers_quests_generator.Worker(name='game_quests_generator')
self.worker.initialize()
def test_process_request_quest(self):
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_setup_quest') as cmd_setup_quest:
self.worker.process_request_quest(self.hero_1.account_id,
hero_info=logic.create_hero_info(self.hero_1).serialize(),
emissary_id=None,
place_id=None,
person_id=None,
person_action=None)
self.worker.generate_quest()
self.assertEqual(cmd_setup_quest.call_count, 1)
self.assertEqual(cmd_setup_quest.call_args_list[0][0][0], self.hero_1.account_id)
self.assertTrue(questgen_knowledge_base.KnowledgeBase.deserialize(cmd_setup_quest.call_args_list[0][0][1], fact_classes=questgen_facts.FACTS))
def test_generate_quest__empty_queue(self):
self.worker.generate_quest()
def test_process_request_quest__query(self):
old_hero_1_info = logic.create_hero_info(self.hero_1)
self.hero_1.level = 666
new_hero_1_info = logic.create_hero_info(self.hero_1)
hero_2_info = logic.create_hero_info(self.hero_2)
hero_3_info = logic.create_hero_info(self.hero_3)
hero_4_info = logic.create_hero_info(self.hero_4)
self.assertNotEqual(old_hero_1_info, new_hero_1_info)
self.worker.process_request_quest(self.hero_1.account_id,
hero_info=old_hero_1_info.serialize(),
emissary_id=None,
place_id=None,
person_id=None,
person_action=None)
self.worker.process_request_quest(self.hero_2.account_id,
hero_info=hero_2_info.serialize(),
emissary_id=self.emissary.id,
place_id=None,
person_id=None,
person_action=relations.PERSON_ACTION.HARM.value)
self.worker.process_request_quest(self.hero_1.account_id,
hero_info=new_hero_1_info.serialize(),
emissary_id=None,
place_id=None,
person_id=None,
person_action=None)
self.worker.process_request_quest(self.hero_3.account_id,
hero_info=hero_3_info.serialize(),
emissary_id=None,
place_id=self.places[0].id,
person_id=None,
person_action=relations.PERSON_ACTION.HELP.value)
person = self.places[1].persons[0]
self.worker.process_request_quest(self.hero_4.account_id,
hero_info=hero_4_info.serialize(),
emissary_id=None,
place_id=None,
person_id=person.id,
person_action=relations.PERSON_ACTION.HARM.value)
self.assertEqual(self.worker.requests_query, collections.deque([self.account_1.id,
self.account_2.id,
self.account_3.id,
self.account_4.id]))
args_1 = {'info': new_hero_1_info,
'emissary_id': None,
'place_id': None,
'person_id': None,
'person_action': None}
args_2 = {'info': hero_2_info,
'emissary_id': self.emissary.id,
'place_id': None,
'person_id': None,
'person_action': relations.PERSON_ACTION.HARM}
args_3 = {'info': hero_3_info,
'emissary_id': None,
'place_id': self.places[0].id,
'person_id': None,
'person_action': relations.PERSON_ACTION.HELP}
args_4 = {'info': hero_4_info,
'emissary_id': None,
'place_id': None,
'person_id': person.id,
'person_action': relations.PERSON_ACTION.HARM}
self.assertEqual(self.worker.requests_heroes_infos,
{self.account_1.id: args_1,
self.account_2.id: args_2,
self.account_3.id: args_3,
self.account_4.id: args_4})
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_setup_quest') as cmd_setup_quest:
self.worker.generate_quest()
self.assertEqual(cmd_setup_quest.call_args_list[0][0][0], self.account_1.id)
self.assertEqual(self.worker.requests_query, collections.deque([self.account_2.id,
self.account_3.id,
self.account_4.id]))
self.assertEqual(self.worker.requests_heroes_infos,
{self.account_2.id: args_2,
self.account_3.id: args_3,
self.account_4.id: args_4})
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_setup_quest') as cmd_setup_quest:
self.worker.generate_quest()
self.assertEqual(cmd_setup_quest.call_args_list[0][0][0], self.account_2.id)
self.assertEqual(self.worker.requests_query, collections.deque([self.account_3.id,
self.account_4.id]))
self.assertEqual(self.worker.requests_heroes_infos,
{self.account_3.id: args_3,
self.account_4.id: args_4})
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_setup_quest') as cmd_setup_quest:
self.worker.generate_quest()
self.assertEqual(cmd_setup_quest.call_args_list[0][0][0], self.account_3.id)
self.assertEqual(self.worker.requests_query, collections.deque([self.account_4.id]))
self.assertEqual(self.worker.requests_heroes_infos,
{self.account_4.id: args_4})
with mock.patch('the_tale.game.workers.supervisor.Worker.cmd_setup_quest') as cmd_setup_quest:
self.worker.generate_quest()
self.assertEqual(cmd_setup_quest.call_args_list[0][0][0], self.account_4.id)
self.assertEqual(self.worker.requests_query, collections.deque([]))
self.assertEqual(self.worker.requests_heroes_infos,
{})
| 47.197861
| 150
| 0.540335
|
61c13aaf5913dbd803f099c90235dd4ee70cef51
| 7,599
|
py
|
Python
|
tests/test_reader.py
|
virtuald/pynsq
|
40a637b02c28edfb7723373b81f7e7b9d5e364ed
|
[
"MIT"
] | null | null | null |
tests/test_reader.py
|
virtuald/pynsq
|
40a637b02c28edfb7723373b81f7e7b9d5e364ed
|
[
"MIT"
] | null | null | null |
tests/test_reader.py
|
virtuald/pynsq
|
40a637b02c28edfb7723373b81f7e7b9d5e364ed
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import signal
import subprocess
import time
import ssl
import tornado.httpclient
import tornado.testing
# shunt '..' into sys.path since we are in a 'tests' subdirectory
base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
if base_dir not in sys.path:
sys.path.insert(0, base_dir)
from nsq import protocol
from nsq.async import AsyncConn
from nsq.deflate_socket import DeflateSocket
from nsq.reader import Reader
from nsq.snappy_socket import SnappySocket
class ReaderIntegrationTest(tornado.testing.AsyncTestCase):
identify_options = {
'user_agent': 'sup',
'snappy': True,
'tls_v1': True,
'tls_options': {'cert_reqs': ssl.CERT_NONE},
'heartbeat_interval': 10,
'output_buffer_size': 4096,
'output_buffer_timeout': 50
}
nsqd_command = ['nsqd', '--verbose', '--snappy',
'--tls-key=%s/tests/key.pem' % base_dir,
'--tls-cert=%s/tests/cert.pem' % base_dir]
def setUp(self):
super(ReaderIntegrationTest, self).setUp()
self.processes = []
proc = subprocess.Popen(self.nsqd_command)
self.processes.append(proc)
http = tornado.httpclient.HTTPClient()
start = time.time()
while True:
try:
resp = http.fetch('http://127.0.0.1:4151/ping')
if resp.body == b'OK':
break
continue
except:
if time.time() - start > 5:
raise
time.sleep(0.1)
continue
def tearDown(self):
super(ReaderIntegrationTest, self).tearDown()
for proc in self.processes:
os.kill(proc.pid, signal.SIGKILL)
proc.wait()
def test_bad_reader_arguments(self):
topic = 'test_reader_msgs_%s' % time.time()
bad_options = dict(self.identify_options)
bad_options.update(dict(foo=10))
handler = lambda x: None
self.assertRaises(
AssertionError,
Reader,
nsqd_tcp_addresses=['127.0.0.1:4150'], topic=topic,
channel='ch', io_loop=self.io_loop,
message_handler=handler, max_in_flight=100,
**bad_options)
def test_conn_identify(self):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop)
c.on('identify_response', self.stop)
c.connect()
response = self.wait()
print(response)
assert response['conn'] is c
assert isinstance(response['data'], dict)
def test_conn_identify_options(self):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
c.on('identify_response', self.stop)
c.connect()
response = self.wait()
print(response)
assert response['conn'] is c
assert isinstance(response['data'], dict)
assert response['data']['snappy'] is True
assert response['data']['tls_v1'] is True
def test_conn_socket_upgrade(self):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
c.on('ready', self.stop)
c.connect()
self.wait()
assert isinstance(c.socket, SnappySocket)
assert isinstance(c.socket._socket, ssl.SSLSocket)
def test_conn_subscribe(self):
topic = 'test_conn_suscribe_%s' % time.time()
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
def _on_ready(*args, **kwargs):
c.on('response', self.stop)
c.send(protocol.subscribe(topic, 'ch'))
c.on('ready', _on_ready)
c.connect()
response = self.wait()
print(response)
assert response['conn'] is c
assert response['data'] == b'OK'
def _send_messages(self, topic, count, body):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop)
c.connect()
def _on_ready(*args, **kwargs):
for i in range(count):
c.send(protocol.pub(topic, body))
c.on('ready', _on_ready)
def test_conn_messages(self):
self.msg_count = 0
topic = 'test_conn_suscribe_%s' % time.time()
self._send_messages(topic, 5, b'sup')
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
def _on_message(*args, **kwargs):
self.msg_count += 1
if c.rdy == 0:
self.stop()
def _on_ready(*args, **kwargs):
c.on('message', _on_message)
c.send(protocol.subscribe(topic, 'ch'))
c.send_rdy(5)
c.on('ready', _on_ready)
c.connect()
self.wait()
assert self.msg_count == 5
def test_reader_messages(self):
self.msg_count = 0
num_messages = 500
topic = 'test_reader_msgs_%s' % time.time()
self._send_messages(topic, num_messages, b'sup')
def handler(msg):
assert msg.body == b'sup'
self.msg_count += 1
if self.msg_count >= num_messages:
self.stop()
return True
r = Reader(nsqd_tcp_addresses=['127.0.0.1:4150'], topic=topic, channel='ch',
io_loop=self.io_loop, message_handler=handler, max_in_flight=100,
**self.identify_options)
self.wait()
r.close()
def test_reader_heartbeat(self):
this = self
this.count = 0
def handler(msg):
return True
class HeartbeatReader(Reader):
def heartbeat(self, conn):
this.count += 1
if this.count == 2:
this.stop()
topic = 'test_reader_hb_%s' % time.time()
HeartbeatReader(nsqd_tcp_addresses=['127.0.0.1:4150'], topic=topic, channel='ch',
io_loop=self.io_loop, message_handler=handler, max_in_flight=100,
heartbeat_interval=1)
self.wait()
class DeflateReaderIntegrationTest(ReaderIntegrationTest):
identify_options = {
'user_agent': 'sup',
'deflate': True,
'deflate_level': 6,
'tls_v1': True,
'tls_options': {'cert_reqs': ssl.CERT_NONE},
'heartbeat_interval': 10,
'output_buffer_size': 4096,
'output_buffer_timeout': 50
}
nsqd_command = ['nsqd', '--verbose', '--deflate',
'--tls-key=%s/tests/key.pem' % base_dir,
'--tls-cert=%s/tests/cert.pem' % base_dir]
def test_conn_identify_options(self):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
c.on('identify_response', self.stop)
c.connect()
response = self.wait()
print(response)
assert response['conn'] is c
assert isinstance(response['data'], dict)
assert response['data']['deflate'] is True
assert response['data']['tls_v1'] is True
def test_conn_socket_upgrade(self):
c = AsyncConn('127.0.0.1', 4150, io_loop=self.io_loop,
**self.identify_options)
c.on('ready', self.stop)
c.connect()
self.wait()
assert isinstance(c.socket, DeflateSocket)
assert isinstance(c.socket._socket, ssl.SSLSocket)
| 31.6625
| 90
| 0.574023
|
9e3688906b0219192216008ef1c6c8222810aa4e
| 1,016
|
py
|
Python
|
packages/python/plotly/plotly/validators/histogram2d/_xcalendar.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/_xcalendar.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/_xcalendar.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class XcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xcalendar", parent_name="histogram2d", **kwargs):
super(XcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"chinese",
"coptic",
"discworld",
"ethiopian",
"gregorian",
"hebrew",
"islamic",
"jalali",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs,
)
| 30.787879
| 85
| 0.40748
|
ecf24e24f8658c2a121a0f3c023feee45eb775e3
| 144
|
py
|
Python
|
backend/adminapp/apps.py
|
christian-japan-devs/vietcatholic-jp
|
1a800f478584538cc5e21ddd5816894dff8bd90f
|
[
"MIT"
] | null | null | null |
backend/adminapp/apps.py
|
christian-japan-devs/vietcatholic-jp
|
1a800f478584538cc5e21ddd5816894dff8bd90f
|
[
"MIT"
] | 6
|
2021-05-29T04:39:00.000Z
|
2021-07-04T00:40:15.000Z
|
backend/adminapp/apps.py
|
christian-japan-devs/vietcatholic-jp
|
1a800f478584538cc5e21ddd5816894dff8bd90f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AdminappConfig(AppConfig):
name = 'adminapp'
def ready(self):
import adminapp.signals
| 18
| 33
| 0.708333
|
517fb34d5bd6a317364d4d37076e5073fb7ef712
| 3,773
|
py
|
Python
|
algorithms/Other/print_concentric_rectangle.py
|
serlop3791/useful_functions
|
039aeb52f78bcf9cae09cb5984a059a6f78a54f6
|
[
"Apache-2.0"
] | null | null | null |
algorithms/Other/print_concentric_rectangle.py
|
serlop3791/useful_functions
|
039aeb52f78bcf9cae09cb5984a059a6f78a54f6
|
[
"Apache-2.0"
] | null | null | null |
algorithms/Other/print_concentric_rectangle.py
|
serlop3791/useful_functions
|
039aeb52f78bcf9cae09cb5984a059a6f78a54f6
|
[
"Apache-2.0"
] | null | null | null |
# Print concentric rectangular pattern in a 2d matrix.
#
# Let us show you some examples to clarify what we mean.
#
# Example 1:
#
# Input: A = 4.
#
# Output:
#
# 4 4 4 4 4 4 4
# 4 3 3 3 3 3 4
# 4 3 2 2 2 3 4
# 4 3 2 1 2 3 4
# 4 3 2 2 2 3 4
# 4 3 3 3 3 3 4
# 4 4 4 4 4 4 4
# Example 2:
#
# Input: A = 3.
#
# Output:
#
# 3 3 3 3 3
# 3 2 2 2 3
# 3 2 1 2 3
# 3 2 2 2 3
# 3 3 3 3 3
# The outermost rectangle is formed by A, then the next outermost is formed by A-1 and so on.
#
# You will be given A as an argument to the function you need to implement, and you need to return a 2D array.
# # Example 2:
# #
# # Input: A = 3.
# #
# # Output:
# #
# # 3 3 3 3 3
# # 3 2 2 2 3
# # 3 2 1 2 3
# # 3 2 2 2 3
# # 3 3 3 3 3
def get_empty_matrix(size):
rows_count, cols_count = size, size
return [[0 for _ in range(cols_count)] for _ in range(rows_count)]
def print_matrix_rows(matrix):
for i in range(len(matrix)):
print(str(matrix[i]), end='\n')
# print_matrix_rows(get_empty_matrix(5))
def fill_matrix(matrix):
count = 0
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrix[i][j] = count
count += 1
return matrix
# print_matrix_rows(fill_matrix(get_empty_matrix(3)))
# matrix = get_empty_matrix(2)
# # Input: A = 2.
# # Output:
# # 2 2 2
# # 2 1 2
# # 2 2 2
matrix = fill_matrix(get_empty_matrix(3))
print_matrix_rows(matrix)
# end of top right
# end of bottom right
# end of bottom left
# end of top left
# concentric traversal
# 0 1 2 5 8 7 6 3 4
#
# for i in range(0, len(matrix[0])):
# print(matrix[0][i], end=' ')
# print(" ")
# for i in range(0, len(matrix)):
# # i = 1, j = 2 or j = len(matrix[i]) - 1
# num_to_print = matrix[0]
# # print(i, end=' ')
# Python3 program for printing
# the rectangular pattern
# Function to print the pattern
def printPattern(n):
# a = n, b = n – 1, c = n – 2
# number of rows and columns to be printed
s = 2 * n - 1
# Upper Half
for i in range(0, int(s / 2) + 1):
m = n
# Decreasing part
for j in range(0, i):
print(m, end=" ")
m -= 1
# Constant Part
for k in range(0, s - 2 * i):
print(n - i, end=" ")
# Increasing part.
m = n - i + 1
for l in range(0, i):
print(m, end=" ")
m += 1
print("")
# Lower Half
for i in range(int(s / 2), -1, -1):
# Decreasing Part
m = n
for j in range(0, i):
print(m, end=" ")
m -= 1
# Constant Part.
for k in range(0, s - 2 * i):
print(n - i, end=" ")
# Decreasing Part
m = n - i + 1
for l in range(0, i):
print(m, end=" ")
m += 1
print("")
# Driven Program
if __name__ == '__main__':
n = 3
printPattern(n)
# this code is contributed by Smitha Dinesh
# Semwal
# Python3 program for printing
# the rectangular pattern
# Function to print the pattern
# def printPattern(n):
# arraySize = n * 2 - 1;
# result = [[0 for x in range(arraySize)]
# for y in range(arraySize)];
#
# # Fill the values
# for i in range(arraySize):
# for j in range(arraySize):
# if (abs(i - (arraySize // 2)) >
# abs(j - (arraySize // 2))):
# result[i][j] = abs(i - (arraySize // 2)) + 1;
# else:
# result[i][j] = abs(j - (arraySize // 2)) + 1;
#
# # Print the array
# for i in range(arraySize):
# for j in range(arraySize):
# print(result[i][j], end=" ");
# print("");
#
#
# # Driver Code
# n = 3;
#
# printPattern(n);
# This code is contributed by mits
| 19.25
| 110
| 0.521601
|
f736eeac450d1ff8d9e88665aa56c3f3474104df
| 2,597
|
py
|
Python
|
dgp/core/context.py
|
dataspot/dgp
|
553a255a4884b935cf2efecdc761050232f0f066
|
[
"MIT"
] | 1
|
2019-07-17T11:34:27.000Z
|
2019-07-17T11:34:27.000Z
|
dgp/core/context.py
|
datahq/dgp
|
f39592ce20ba67b73b08188f14585b6eb3d43f96
|
[
"MIT"
] | 2
|
2019-04-30T12:32:32.000Z
|
2019-04-30T12:35:26.000Z
|
dgp/core/context.py
|
dataspot/dgp
|
553a255a4884b935cf2efecdc761050232f0f066
|
[
"MIT"
] | null | null | null |
import copy
import tabulator
import requests
from .config import Config
from ..config.log import logger
from ..config.consts import CONFIG_SKIP_ROWS, CONFIG_TAXONOMY_ID, CONFIG_FORMAT, CONFIG_ALLOW_INSECURE_TLS
from ..taxonomies import TaxonomyRegistry, Taxonomy
_workbook_cache = {}
def trimmer(extended_rows):
for row_number, headers, row in extended_rows:
if headers:
row = row[:len(headers)]
if len(row) < len(headers):
continue
yield (row_number, headers, row)
class Context():
def __init__(self, config: Config, taxonomies: TaxonomyRegistry):
self.config = config
self.taxonomies: TaxonomyRegistry = taxonomies
self._stream = None
self.enricher_dir = None
def _structure_params(self):
skip_rows = self.config.get(CONFIG_SKIP_ROWS) if CONFIG_SKIP_ROWS in self.config else None
fmt = self.config.get(CONFIG_FORMAT)
return dict(
headers=skip_rows + 1 if skip_rows is not None else None,
ignore_blank_headers=fmt in ('csv', 'xlsx', 'xls'),
post_parse=[trimmer]
)
def reset_stream(self):
self._stream = None
def http_session(self):
http_session = requests.Session()
http_session.headers.update(tabulator.config.HTTP_HEADERS)
if self.config.get(CONFIG_ALLOW_INSECURE_TLS):
http_session.verify = False
return http_session
@property
def stream(self):
if self._stream is None:
source = copy.deepcopy(self.config._unflatten().get('source', {}))
structure = self._structure_params()
try:
path = source.pop('path')
if not path:
return None
logger.info('Opening stream %s', path)
if 'workbook_cache' in source:
source['workbook_cache'] = _workbook_cache
self._stream = tabulator.Stream(path, **source, **structure, http_session=self.http_session()).open()
for k in source.keys():
self.config.get('source.' + k)
for k in structure.keys():
self.config.get('structure.' + k)
except Exception:
logger.exception('Failed to open URL, source=%r, structure=%r', source, structure)
raise
return self._stream
@property
def taxonomy(self) -> Taxonomy:
if CONFIG_TAXONOMY_ID in self.config:
return self.taxonomies.get(self.config[CONFIG_TAXONOMY_ID])
| 34.171053
| 117
| 0.613015
|
8bf307ea747a7d3f44252bb14172e6c64c606964
| 9,755
|
py
|
Python
|
src/gluonts/core/serde/_base.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 2,648
|
2019-06-03T17:18:27.000Z
|
2022-03-31T08:29:22.000Z
|
src/gluonts/core/serde/_base.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 1,220
|
2019-06-04T09:00:14.000Z
|
2022-03-31T10:45:43.000Z
|
src/gluonts/core/serde/_base.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 595
|
2019-06-04T01:04:31.000Z
|
2022-03-30T10:40:26.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import textwrap
from enum import Enum
from functools import singledispatch, partial
from pathlib import PurePath
from pydoc import locate
from typing import Any, NamedTuple, cast
from toolz.dicttoolz import valmap
from pydantic import BaseModel
from gluonts.core import fqname_for
bad_type_msg = textwrap.dedent(
"""
Cannot serialize type {}. See the documentation of the `encode` and
`validate` functions at
http://gluon-ts.mxnet.io/api/gluonts/gluonts.html
and the Python documentation of the `__getnewargs_ex__` magic method at
https://docs.python.org/3/library/pickle.html#object.__getnewargs_ex__
for more information how to make this type serializable.
"""
).lstrip()
class StatelessMeta(type):
def __call__(cls, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
if isinstance(self, cls):
if hasattr(self.__init__, "__checked__"):
(this, *args), kwargs = self.__init__.__checked__(
self, *args, **kwargs
)
self.__init__.__wrapped__(this, *args, **kwargs)
else:
self.__init__(*args, **kwargs)
self.__init_args__ = args, kwargs
self.__sealed__ = True
return self
class Stateless(metaclass=StatelessMeta):
def __getnewargs_ex__(self):
return self.__init_args__
def __setattr__(self, name, value):
if hasattr(self, "__sealed__"):
classname = self.__class__.__name__
raise ValueError(
f"Assignment to `{name}` outside of `{classname}.__init__`."
)
return object.__setattr__(self, name, value)
class Stateful:
pass
class Kind(str, Enum):
Type = "type"
Instance = "instance"
Stateful = "stateful"
@singledispatch
def encode(v: Any) -> Any:
"""
Transforms a value `v` as a serializable intermediate representation (for
example, named tuples are encoded as dictionaries). The intermediate
representation is then recursively traversed and serialized either as
Python code or as JSON string.
This function is decorated with :func:`~functools.singledispatch` and can
be specialized by clients for families of types that are not supported by
the basic implementation (explained below).
Examples
--------
The conversion logic implemented by the basic implementation is used
as a fallback and is best explained by a series of examples.
Lists (as lists).
>>> encode([1, 2.0, '3'])
[1, 2.0, '3']
Dictionaries (as dictionaries).
>>> encode({'a': 1, 'b': 2.0, 'c': '3'})
{'a': 1, 'b': 2.0, 'c': '3'}
Named tuples (as dictionaries with a
``'__kind__': <Kind.Instance: 'instance'>`` member).
>>> from pprint import pprint
>>> from typing import NamedTuple
>>> class ComplexNumber(NamedTuple):
... x: float = 0.0
... y: float = 0.0
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': <Kind.Instance: 'instance'>,
'class': 'gluonts.core.serde._base.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a :func:`~gluonts.core.component.validated` initializer (as
dictionaries with a ``'__kind__': <Kind.Instance: 'instance'>`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': <Kind.Instance: 'instance'>,
'args': [],
'class': 'gluonts.core.serde._base.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a ``__getnewargs_ex__`` magic method (as dictionaries with a
``'__kind__': <Kind.Instance: 'instance'>`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
... def __getnewargs_ex__(self):
... return [], {'x': self.x, 'y': self.y}
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': <Kind.Instance: 'instance'>,
'args': [],
'class': 'gluonts.core.serde._base.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Types (as dictionaries with a ``'__kind__': <Kind.Type: 'type'> member``).
>>> encode(ComplexNumber)
{'__kind__': <Kind.Type: 'type'>,
'class': 'gluonts.core.serde._base.ComplexNumber'}
Parameters
----------
v
The value to be encoded.
Returns
-------
Any
An encoding of ``v`` that can be serialized to Python code or
JSON string.
See Also
--------
decode
Inverse function.
dump_json
Serializes an object to a JSON string.
dump_code
Serializes an object to a Python code string.
"""
if v is None:
return None
if isinstance(v, (float, int, str)):
return v
# check for namedtuples first, to encode them not as plain tuples
if isinstance(v, tuple) and hasattr(v, "_asdict"):
v = cast(NamedTuple, v)
return {
"__kind__": Kind.Instance,
"class": fqname_for(v.__class__),
"kwargs": encode(v._asdict()),
}
if isinstance(v, (tuple, set)):
return {
"__kind__": Kind.Instance,
"class": fqname_for(type(v)),
"args": [list(map(encode, v))],
}
if isinstance(v, list):
return list(map(encode, v))
if isinstance(v, dict):
return valmap(encode, v)
if isinstance(v, type):
return {"__kind__": Kind.Type, "class": fqname_for(v)}
if hasattr(v, "__getnewargs_ex__"):
args, kwargs = v.__getnewargs_ex__() # mypy: ignore
return {
"__kind__": Kind.Instance,
"class": fqname_for(v.__class__),
# args need to be a list, since we encode tuples explicitly
"args": encode(list(args)),
"kwargs": encode(kwargs),
}
try:
# as fallback, we try to just take the path of the value
fqname = fqname_for(v)
assert (
"<lambda>" not in fqname
), f"Can't serialize lambda function {fqname}"
if hasattr(v, "__self__") and hasattr(v, "__func__"):
# v is a method
# to model`obj.method`, we encode `getattr(obj, "method")`
return {
"__kind__": Kind.Instance,
"class": fqname_for(getattr),
"args": encode((v.__self__, v.__func__.__name__)),
}
return {"__kind__": Kind.Type, "class": fqname_for(v)}
except AttributeError:
pass
raise RuntimeError(bad_type_msg.format(fqname_for(v.__class__)))
@encode.register(Stateful)
def encode_from_state(v: Stateful) -> Any:
return {
"__kind__": Kind.Stateful,
"class": fqname_for(v.__class__),
"kwargs": encode(v.__dict__),
}
@encode.register(PurePath)
def encode_path(v: PurePath) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~PurePath` class.
"""
return {
"__kind__": Kind.Instance,
"class": fqname_for(v.__class__),
"args": [str(v)],
}
@encode.register(BaseModel)
def encode_pydantic_model(v: BaseModel) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~BaseModel` class.
"""
return {
"__kind__": Kind.Instance,
"class": fqname_for(v.__class__),
"kwargs": encode(v.__dict__),
}
@encode.register(partial)
def encode_partial(v: partial) -> Any:
args = (v.func,) + v.args
return {
"__kind__": Kind.Instance,
"class": fqname_for(v.__class__),
"args": encode(args),
"kwargs": encode(v.keywords),
}
def decode(r: Any) -> Any:
"""
Decodes a value from an intermediate representation `r`.
Parameters
----------
r
An intermediate representation to be decoded.
Returns
-------
Any
A Python data structure corresponding to the decoded version of ``r``.
See Also
--------
encode
Inverse function.
"""
# structural recursion over the possible shapes of r
if type(r) == dict and "__kind__" in r:
kind = r["__kind__"]
cls = cast(Any, locate(r["class"]))
assert cls is not None, f"Can not locate {r['class']}."
if kind == Kind.Type:
return cls
args = decode(r.get("args", []))
kwargs = decode(r.get("kwargs", {}))
if kind == Kind.Instance:
return cls(*args, **kwargs)
if kind == Kind.Stateful:
obj = cls.__new__(cls)
obj.__dict__.update(kwargs)
return obj
raise ValueError(f"Unknown kind {kind}.")
if type(r) == dict:
return valmap(decode, r)
if type(r) == list:
return list(map(decode, r))
return r
| 28.523392
| 78
| 0.593337
|
1eb6ba96b1af612c3af56275b4d7cc0511b32822
| 1,864
|
py
|
Python
|
mir/termdbg/ascii.py
|
darkfeline/mir.termdbg
|
a654b4ac0cae0da012b683d99b2d290db0acb48b
|
[
"Apache-2.0"
] | null | null | null |
mir/termdbg/ascii.py
|
darkfeline/mir.termdbg
|
a654b4ac0cae0da012b683d99b2d290db0acb48b
|
[
"Apache-2.0"
] | null | null | null |
mir/termdbg/ascii.py
|
darkfeline/mir.termdbg
|
a654b4ac0cae0da012b683d99b2d290db0acb48b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ASCII character table.
Constants:
CONTROL_CHARS -- Mapping of ints to control character information
Functions:
is_printable -- Return whether character is printable
"""
import collections
import csv
import io
import pkg_resources
CONTROL_CHARS = None
ControlChar = collections.namedtuple(
'ControlChar', 'value,abbrev,unicode,repr,name')
def is_printable(char: int):
"""Return whether char is printable."""
return char not in CONTROL_CHARS
def _init_control_chars():
"""Initialize CONTROL_CHARS constant."""
global CONTROL_CHARS
CONTROL_CHARS = {
char.value: char
for char in _load_control_chars()
}
def _load_control_chars():
"""Yield ControlChars read from the package's list."""
with _open_control_chars_file() as file:
yield from _load_control_chars_from_file(file)
def _open_control_chars_file():
"""Open control chars file shipped with this package."""
binary_stream = pkg_resources.resource_stream(__name__, 'ascii.csv')
return io.TextIOWrapper(binary_stream)
def _load_control_chars_from_file(file):
"""Yield ControlChars read from a CSV file."""
reader = csv.reader(file)
for row in reader:
row[0] = int(row[0])
yield ControlChar(*row)
_init_control_chars()
| 26.628571
| 74
| 0.732833
|
9df180ec84138a3c91d13d66d30c133e173343e2
| 4,507
|
py
|
Python
|
sysinv/cgts-client/cgts-client/cgtsclient/v1/certificate_shell.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | null | null | null |
sysinv/cgts-client/cgts-client/cgtsclient/v1/certificate_shell.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | null | null | null |
sysinv/cgts-client/cgts-client/cgtsclient/v1/certificate_shell.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
import os
from cgtsclient.common import utils
from cgtsclient import exc
def _print_certificate_show(certificate):
fields = ['uuid', 'certtype', 'signature', 'start_date', 'expiry_date']
if isinstance(certificate, dict):
data = [(f, certificate.get(f, '')) for f in fields]
details = ('details', certificate.get('details', ''))
else:
data = [(f, getattr(certificate, f, '')) for f in fields]
details = ('details', getattr(certificate, 'details', ''))
if details[1]:
data.append(details)
utils.print_tuple_list(data)
@utils.arg('certificate_uuid', metavar='<certificate_uuid>',
help="UUID of certificate or the reserved word 'tpm' "
"to show TPM certificate")
def do_certificate_show(cc, args):
"""Show Certificate details."""
if args.certificate_uuid == 'tpm':
certificates = cc.certificate.list()
for cert in certificates:
if cert.certtype == 'tpm_mode':
args.certificate_uuid = cert.uuid
break
else:
print("No TPM certificate installed")
return
certificate = cc.certificate.get(args.certificate_uuid)
if certificate:
_print_certificate_show(certificate)
else:
print("No Certificates installed")
def do_certificate_list(cc, args):
"""List certificates."""
certificates = cc.certificate.list()
fields = ['uuid', 'certtype', 'expiry_date']
field_labels = fields
utils.print_list(certificates, fields, field_labels, sortby=0)
@utils.arg('certificate_file',
metavar='<certificate_file>',
help='Path to Certificate file (PEM format) to install. '
'WARNING: For security reasons, the original certificate_file '
'will be removed. Installing an invalid certificate '
'could cause service interruption.')
@utils.arg('-p', '--passphrase',
metavar='<passphrase>',
help='The passphrase for the PEM file')
@utils.arg('-m', '--mode',
metavar='<mode>',
help="optional mode: 'tpm_mode',"
"'docker_registry, 'openstack', 'openstack_ca', 'ssl_ca'. "
"Default is 'ssl'.")
def do_certificate_install(cc, args):
"""Install certificate."""
certificate_file = args.certificate_file
try:
sec_file = open(certificate_file, 'rb')
except Exception:
raise exc.CommandError("Error: Could not open file %s." %
certificate_file)
data = {'passphrase': args.passphrase,
'mode': args.mode}
print("WARNING: For security reasons, the original certificate, ")
print("containing the private key, will be removed, ")
print("once the private key is processed.")
try:
response = cc.certificate.certificate_install(sec_file, data=data)
except exc.HTTPNotFound:
raise exc.CommandError('Certificate not installed %s. No response.' %
certificate_file)
except Exception as e:
raise exc.CommandError('Certificate %s not installed: %s' %
(certificate_file, e))
else:
certificates = response.get('certificates')
if certificates:
for certificate in certificates:
_print_certificate_show(certificate)
error = response.get('error')
if error:
print("WARNING: Some certificates were not installed.")
print(error)
else:
try:
os.remove(certificate_file)
except OSError:
raise exc.CommandError('Error: Could not remove the '
'certificate %s' % certificate_file)
@utils.arg('certificate_uuid', metavar='<certificate_uuid>',
help="UUID of certificate to uninstall")
@utils.arg('-m', '--mode',
metavar='<mode>',
help="Supported mode: 'ssl_ca'.")
def do_certificate_uninstall(cc, args):
"""Uninstall certificate."""
supported_modes = ['ssl_ca']
if args.mode not in supported_modes:
raise exc.CommandError('Unsupported mode: %s' % args.mode)
cc.certificate.certificate_uninstall(args.certificate_uuid)
print('Uninstalled certificate: %s' % (args.certificate_uuid))
| 34.40458
| 79
| 0.613934
|
80181ac28fd0e4c6951f3917d8ffa73a6a1d6191
| 9,387
|
py
|
Python
|
network_builder.py
|
comRamona/Multimodal-Sentiment-Analysis---ACL-2018
|
2831df3ef210cb3e259bbc43dd39159533f4a33e
|
[
"BSD-3-Clause"
] | 1
|
2018-05-22T13:08:54.000Z
|
2018-05-22T13:08:54.000Z
|
network_builder.py
|
comRamona/Multimodal-Sentiment-Analysis-Multicomp-ACL-2018
|
2831df3ef210cb3e259bbc43dd39159533f4a33e
|
[
"BSD-3-Clause"
] | null | null | null |
network_builder.py
|
comRamona/Multimodal-Sentiment-Analysis-Multicomp-ACL-2018
|
2831df3ef210cb3e259bbc43dd39159533f4a33e
|
[
"BSD-3-Clause"
] | 1
|
2020-06-02T00:57:20.000Z
|
2020-06-02T00:57:20.000Z
|
import tensorflow as tf
from network_architectures import VGGClassifier, FCCLayerClassifier
class ClassifierNetworkGraph:
def __init__(self, input_x, target_placeholder, dropout_rate,
batch_size=100, num_channels=1, n_classes=100, is_training=True, augment_rotate_flag=True,
tensorboard_use=False, use_batch_normalization=False, strided_dim_reduction=True,
network_name='VGG_classifier'):
"""
Initializes a Classifier Network Graph that can build models, train, compute losses and save summary statistics
and images
:param input_x: A placeholder that will feed the input images, usually of size [batch_size, height, width,
channels]
:param target_placeholder: A target placeholder of size [batch_size,]. The classes should be in index form
i.e. not one hot encoding, that will be done automatically by tf
:param dropout_rate: A placeholder of size [None] that holds a single float that defines the amount of dropout
to apply to the network. i.e. for 0.1 drop 0.1 of neurons
:param batch_size: The batch size
:param num_channels: Number of channels
:param n_classes: Number of classes we will be classifying
:param is_training: A placeholder that will indicate whether we are training or not
:param augment_rotate_flag: A placeholder indicating whether to apply rotations augmentations to our input data
:param tensorboard_use: Whether to use tensorboard in this experiment
:param use_batch_normalization: Whether to use batch normalization between layers
:param strided_dim_reduction: Whether to use strided dim reduction instead of max pooling
"""
self.batch_size = batch_size
if network_name == "VGG_classifier":
self.c = VGGClassifier(self.batch_size, name="classifier_neural_network",
batch_norm_use=use_batch_normalization, num_channels=num_channels,
num_classes=n_classes, layer_stage_sizes=[64, 128, 256],
strided_dim_reduction=strided_dim_reduction)
elif network_name == "FCCClassifier":
self.c = FCCLayerClassifier(self.batch_size, name="classifier_neural_network",
batch_norm_use=use_batch_normalization, num_channels=num_channels,
num_classes=n_classes, layer_stage_sizes=[64, 128, 256],
strided_dim_reduction=strided_dim_reduction)
self.input_x = input_x
self.dropout_rate = dropout_rate
self.targets = target_placeholder
self.training_phase = is_training
self.n_classes = n_classes
self.iterations_trained = 0
self.augment_rotate = augment_rotate_flag
self.is_tensorboard = tensorboard_use
self.strided_dim_reduction = strided_dim_reduction
self.use_batch_normalization = use_batch_normalization
def loss(self):
"""build models, calculates losses, saves summary statistcs and images.
Returns:
dict of losses.
"""
with tf.name_scope("losses"):
image_inputs = self.data_augment_batch(self.input_x) # conditionally apply augmentaions
true_outputs = self.targets
# produce predictions and get layer features to save for visual inspection
preds, layer_features = self.c(image_input=image_inputs, training=self.training_phase,
dropout_rate=self.dropout_rate)
# compute loss and accuracy
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.cast(true_outputs, tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
crossentropy_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_outputs, logits=preds))
# add loss and accuracy to collections
tf.add_to_collection('crossentropy_losses', crossentropy_loss)
tf.add_to_collection('accuracy', accuracy)
# save summaries for the losses, accuracy and image summaries for input images, augmented images
# and the layer features
if len(self.input_x.get_shape().as_list()) == 4:
self.save_features(name="VGG_features", features=layer_features)
tf.summary.image('image', [tf.concat(tf.unstack(self.input_x, axis=0), axis=0)])
tf.summary.image('augmented_image', [tf.concat(tf.unstack(image_inputs, axis=0), axis=0)])
tf.summary.scalar('crossentropy_losses', crossentropy_loss)
tf.summary.scalar('accuracy', accuracy)
return {"crossentropy_losses": tf.add_n(tf.get_collection('crossentropy_losses'),
name='total_classification_loss'),
"accuracy": tf.add_n(tf.get_collection('accuracy'), name='total_accuracy')}
def save_features(self, name, features, num_rows_in_grid=4):
"""
Saves layer features in a grid to be used in tensorboard
:param name: Features name
:param features: A list of feature tensors
"""
for i in range(len(features)):
shape_in = features[i].get_shape().as_list()
channels = shape_in[3]
y_channels = num_rows_in_grid
x_channels = int(channels / y_channels)
activations_features = tf.reshape(features[i], shape=(shape_in[0], shape_in[1], shape_in[2],
y_channels, x_channels))
activations_features = tf.unstack(activations_features, axis=4)
activations_features = tf.concat(activations_features, axis=2)
activations_features = tf.unstack(activations_features, axis=3)
activations_features = tf.concat(activations_features, axis=1)
activations_features = tf.expand_dims(activations_features, axis=3)
tf.summary.image('{}_{}'.format(name, i), activations_features)
def rotate_image(self, image):
"""
Rotates a single image
:param image: An image to rotate
:return: A rotated or a non rotated image depending on the result of the flip
"""
no_rotation_flip = tf.unstack(
tf.random_uniform([1], minval=1, maxval=100, dtype=tf.int32, seed=None,
name=None)) # get a random number between 1 and 100
flip_boolean = tf.less_equal(no_rotation_flip[0], 50)
# if that number is less than or equal to 50 then set to true
random_variable = tf.unstack(tf.random_uniform([1], minval=1, maxval=3, dtype=tf.int32, seed=None, name=None))
# get a random variable between 1 and 3 for how many degrees the rotation will be i.e. k=1 means 1*90,
# k=2 2*90 etc.
image = tf.cond(flip_boolean, lambda: tf.image.rot90(image, k=random_variable[0]),
lambda: image) # if flip_boolean is true the rotate if not then do not rotate
return image
def rotate_batch(self, batch_images):
"""
Rotate a batch of images
:param batch_images: A batch of images
:return: A rotated batch of images (some images will not be rotated if their rotation flip ends up False)
"""
shapes = map(int, list(batch_images.get_shape()))
if len(list(batch_images.get_shape())) < 4:
return batch_images
batch_size, x, y, c = shapes
with tf.name_scope('augment'):
batch_images_unpacked = tf.unstack(batch_images)
new_images = []
for image in batch_images_unpacked:
new_images.append(self.rotate_image(image))
new_images = tf.stack(new_images)
new_images = tf.reshape(new_images, (batch_size, x, y, c))
return new_images
def data_augment_batch(self, batch_images):
"""
Augments data with a variety of augmentations, in the current state only does rotations.
:param batch_images: A batch of images to augment
:return: Augmented data
"""
batch_images = tf.cond(self.augment_rotate, lambda: self.rotate_batch(batch_images), lambda: batch_images)
return batch_images
def train(self, losses, learning_rate=1e-3, beta1=0.9):
"""
Args:
losses dict.
Returns:
train op.
"""
c_opt = tf.train.AdamOptimizer(beta1=beta1, learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Needed for correct batch norm usage
with tf.control_dependencies(update_ops):
c_error_opt_op = c_opt.minimize(losses["crossentropy_losses"], var_list=self.c.variables,
colocate_gradients_with_ops=True)
return c_error_opt_op
def init_train(self):
"""
Builds graph ops and returns them
:return: Summary, losses and training ops
"""
losses_ops = self.loss()
c_error_opt_op = self.train(losses_ops)
summary_op = tf.summary.merge_all()
return summary_op, losses_ops, c_error_opt_op
| 51.861878
| 119
| 0.64291
|
083a23b94a7d06531ca7075340833ab4cb66c337
| 7,504
|
py
|
Python
|
tests/proc/test_tensorbox.py
|
lfd/pennylane
|
c0d269e5f1eba2f9d033bd9b6a79c10a11f4228a
|
[
"Apache-2.0"
] | null | null | null |
tests/proc/test_tensorbox.py
|
lfd/pennylane
|
c0d269e5f1eba2f9d033bd9b6a79c10a11f4228a
|
[
"Apache-2.0"
] | null | null | null |
tests/proc/test_tensorbox.py
|
lfd/pennylane
|
c0d269e5f1eba2f9d033bd9b6a79c10a11f4228a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for TensorBox class"""
import numpy as np
import pytest
import pennylane as qml
from pennylane.proc.numpy_box import NumpyBox
def test_creation_from_list():
"""Test that a NumpyBox is automatically created from a list"""
x = [0.1, 0.2, 0.3]
res = qml.proc.TensorBox(x)
assert isinstance(res, NumpyBox)
assert res.interface == "numpy"
assert isinstance(res.unbox(), np.ndarray)
assert np.all(res.unbox() == x)
def test_creation_from_tuple():
"""Test that a NumpyBox is automatically created from a tuple"""
x = (0.1, 0.2, 0.3)
res = qml.proc.TensorBox(x)
assert isinstance(res, NumpyBox)
assert res.interface == "numpy"
assert isinstance(res.unbox(), np.ndarray)
assert np.all(res.unbox() == x)
def test_creation_from_tensorbox():
"""Test that a tensorbox input simply returns it"""
x = qml.proc.TensorBox(np.array([0.1, 0.2, 0.3]))
res = qml.proc.TensorBox(x)
assert x is res
def test_unknown_input_type():
"""Test that an exception is raised if the input type
is unknown"""
with pytest.raises(ValueError, match="Unknown tensor type"):
qml.proc.TensorBox(True)
def test_astensor():
"""Test conversion of sequences to numpy arrays"""
x = np.array([0.1, 0.2, 0.3])
y = [0.4, 0.5, 0.6]
res = qml.proc.TensorBox(x).astensor(y)
assert isinstance(res, np.ndarray)
assert np.all(res == y)
def test_cast():
"""Test that arrays can be cast to different dtypes"""
x = np.array([1, 2, 3])
res = qml.proc.TensorBox(x).cast(np.float64)
expected = np.array([1.0, 2.0, 3.0])
assert np.all(res == expected)
res = qml.proc.TensorBox(x).cast(np.dtype("int8"))
expected = np.array([1, 2, 3], dtype=np.int8)
assert np.all(res == expected)
res = qml.proc.TensorBox(x).cast("complex128")
expected = np.array([1, 2, 3], dtype=np.complex128)
assert np.all(res == expected)
def test_len():
"""Test length"""
x = np.array([[1, 2], [3, 4]])
res = qml.proc.TensorBox(x)
assert len(res) == len(x) == 2
def test_ufunc_compatibility():
"""Test that the NumpyBox class has ufunc compatibility"""
x = np.array([0.1, 0.2, 0.3])
res = np.sum(np.sin(qml.proc.TensorBox(x)))
assert res == np.sin(0.1) + np.sin(0.2) + np.sin(0.3)
x = np.array([0.1, 0.2, 0.3])
res = np.sum(np.sin(qml.proc.TensorBox(x), out=np.empty([3])))
assert res == np.sin(0.1) + np.sin(0.2) + np.sin(0.3)
def test_inplace_addition():
"""Test that in-place addition works correctly"""
x = qml.proc.TensorBox(np.array([0.0, 0.0, 0.0]))
np.add.at(x, [0, 1, 1], 1)
assert np.all(x == np.array([1.0, 2.0, 0.0]))
def test_addition():
"""Test addition between tensors and arrays"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT + y
assert np.all(res.unbox() == x + y)
yT = qml.proc.TensorBox(y)
res = x + yT
assert np.all(res.unbox() == x + y)
res = xT + yT
assert np.all(res.unbox() == x + y)
def test_subtraction():
"""Test addition between tensors and arrays"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT - y
assert np.all(res.unbox() == x - y)
yT = qml.proc.TensorBox(y)
res = x - yT
assert np.all(res.unbox() == x - y)
res = xT - yT
assert np.all(res.unbox() == x - y)
def test_multiplication():
"""Test multiplication between tensors and arrays"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT * y
assert np.all(res.unbox() == x * y)
yT = qml.proc.TensorBox(y)
res = x * yT
assert np.all(res.unbox() == x * y)
res = xT * yT
assert np.all(res.unbox() == x * y)
def test_division():
"""Test addition between tensors and arrays"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 4], [0.25, 1]])
xT = qml.proc.TensorBox(x)
res = xT / y
assert np.all(res.unbox() == x / y)
yT = qml.proc.TensorBox(y)
res = x / yT
assert np.all(res.unbox() == x / y)
res = xT / yT
assert np.all(res.unbox() == x / y)
res = 5 / yT
assert np.all(res.unbox() == 5 / y)
res = yT / 5
assert np.all(res.unbox() == y / 5)
def test_exponentiation():
"""Test exponentiation between tensors and arrays"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT ** 2
assert np.all(res.unbox() == x ** 2)
yT = qml.proc.TensorBox(y)
res = 2 ** yT
assert np.all(res.unbox() == 2 ** y)
res = xT ** yT
assert np.all(res.unbox() == x ** y)
def test_unbox_list():
"""Test unboxing a mixed list works correctly"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT.unbox_list([y, xT, x])
assert np.all(res == [y, x, x])
def test_numpy():
"""Test that calling numpy() returns a NumPy array representation
of the TensorBox"""
x = np.array([[1, 2], [3, 4]])
xT = qml.proc.TensorBox(x)
assert isinstance(xT.numpy(), np.ndarray)
assert np.all(xT.numpy() == x)
def test_shape():
"""Test that arrays return the right shape"""
x = np.array([[[1, 2], [3, 4]]])
x = qml.proc.TensorBox(x)
res = x.shape
assert res == (1, 2, 2)
def test_expand_dims():
"""Test that dimension expansion works"""
x = np.array([1, 2, 3])
xT = qml.proc.TensorBox(x)
res = xT.expand_dims(axis=1)
expected = np.expand_dims(x, axis=1)
assert isinstance(res, NumpyBox)
assert np.all(res == expected)
def test_ones_like():
"""Test that all ones arrays are correctly created"""
x = np.array([[1, 2, 3], [4, 5, 6]])
xT = qml.proc.TensorBox(x)
res = xT.ones_like()
expected = np.ones_like(x)
assert isinstance(res, NumpyBox)
assert np.all(res == expected)
def test_stack():
"""Test that arrays are correctly stacked together"""
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
xT = qml.proc.TensorBox(x)
res = xT.stack([y, xT, x])
assert np.all(res == np.stack([y, x, x]))
def test_transpose():
"""Test that the transpose is correct"""
x = np.array([[1, 2], [3, 4]])
xT = qml.proc.TensorBox(x)
assert np.all(xT.T == x.T)
def test_requires_grad():
"""Test that the requires grad attribute always returns False"""
x = np.array([[1, 2], [3, 4]])
xT = qml.proc.TensorBox(x)
assert not xT.requires_grad
| 27.487179
| 75
| 0.573561
|
8ae4363eced64a807a7a8ada1007057e4adcec3b
| 16,834
|
py
|
Python
|
change_bomb_rate.py
|
minoriwww/MeterDetection
|
0373cd30fe8a1de4886a8a60860c4714b2b28333
|
[
"MIT"
] | 7
|
2019-11-13T01:02:14.000Z
|
2021-08-10T10:03:46.000Z
|
change_bomb_rate.py
|
minoriwww/MeterDetection
|
0373cd30fe8a1de4886a8a60860c4714b2b28333
|
[
"MIT"
] | null | null | null |
change_bomb_rate.py
|
minoriwww/MeterDetection
|
0373cd30fe8a1de4886a8a60860c4714b2b28333
|
[
"MIT"
] | 1
|
2019-12-26T10:34:18.000Z
|
2019-12-26T10:34:18.000Z
|
import os
dirs = "sitaiqu/rates/"
if not os.path.exists(dirs):
os.makedirs(dirs)
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import data_processing0 as dp
import datetime
import math
import random
from scipy.spatial.distance import pdist, squareform
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import tensorflow as tf
config=tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth=True
tf.Session(config=config)
import numpy as np
np.random.seed(1337)
import keras
from keras.models import Sequential
from keras.layers import Dense
import matplotlib as mpl
import random
from imutils import paths
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
plt.switch_backend('agg')
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.wrappers.scikit_learn import KerasRegressor
from keras.preprocessing.image import img_to_array
from keras.utils import to_categorical
from keras import regularizers
from keras import backend as K
from keras.models import Model, Sequential
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from keras.optimizers import SGD, Adadelta, Adam
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint,Callback
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM, Reshape
import keras.layers.convolutional as conv
from keras.regularizers import l1, l2
import sklearn.metrics as m
import time
from scipy import interp
import cv2
from sklearn.model_selection import StratifiedKFold
# from DProcess import convertRawToXY
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
import hyperopt as hp
from hyperopt import fmin,tpe,hp,STATUS_OK,Trials
from keras.utils import plot_model
from ROC_PR_threshold import roc_curve
import sys
seed = 7
np.random.seed(seed)
def random_pick(some_list,probabilities):
x=random.uniform(0,1)
cumulative_probability=0.0
for item,item_probability in zip(some_list,probabilities):
cumulative_probability += item_probability
if x < cumulative_probability: break
return item
def op(id,df,rate,file_path):
c=random_pick([0,1],[rate,1-rate])
print(c,id)
if c==0 :
single_input(id=id,df=df,DATA_PATH=file_path)
else:
single_bomb(id = id,wat=df,DATA_PATH=file_path)
def rec_plot(s, eps=None, steps=None):
if eps==None: eps=0.1
if steps==None: steps=100
d = pdist(s[:,None])
d = np.floor(d/eps)
d[d>steps] = steps
Z = squareform(d)
return Z
def single_bomb(id,DATA_PATH = "", date = "2015/05/01",wat=None):
if not os.path.exists(DATA_PATH+'csv'):
os.makedirs(DATA_PATH+'csv')
if not os.path.exists(DATA_PATH+'png'):
os.makedirs(DATA_PATH+'png')
d = single_input(id=id,mode=1,df=wat)
df = d.sample(n=1)
df.reset_index(inplace=True)
date = df.loc[0, 'date']
id = id
new = wat[(wat['date']>=date) & (wat['meterID']==id)]
#print(new)
def update(x):
i=(pd.to_datetime(x.loc['date'])-pd.to_datetime(date)).days
x.loc['usage']=x.loc['usage']*(1+i/100)
#i = float(i)
#x.loc['usage'] += x.loc['usage'] * (0.05 * i / math.sqrt((1 + math.pow((i / 15), 2))))
return x.loc['usage']
d1=d[d['date']<date]
d2=d[d['date']>=date]
d2.reset_index(inplace=True)
for i in range(0,d2.iloc[:,0].size-1):
d2.loc[i,'usage']=update(d2[['date','usage']].iloc[i])
d=d1.append(d2)
d=d.drop(columns=['index'])
d=d[['id','usage','date','com_date','week','month','year']]
x=d['usage']
rec = rec_plot(x)
plt.imshow(rec)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.savefig(DATA_PATH+'png/' + str(id) + "single_bomb.png",dpi=300)
d.to_csv(path_or_buf=DATA_PATH+'csv/' + str(id) + "single_bomb.csv", encoding="utf-8",index=False)
def single_input(id=123, DATA_PATH="", mode=0, df=None):
if not os.path.exists(DATA_PATH+'csv'):
os.makedirs(DATA_PATH+'csv')
if not os.path.exists(DATA_PATH+'png'):
os.makedirs(DATA_PATH+'png')
df = df[df['meterID'] == id]
res = pd.DataFrame()
res['id'] = df['meterID']
res['usage'] = df['usage']
res['date'] = pd.to_datetime(df['date'])
res = dp.date_format(res, base='2014/8/3')
res = res.sort_values(by='date')
# df = df[(df['date'] >= pd.to_datetime(begin)) & (df['date'] <= pd.to_datetime(end))]
x = res['usage']
rec = rec_plot(x)
plt.imshow(rec)
plt.xticks([])
plt.yticks([])
plt.axis('off')
if mode ==0 :
plt.savefig(DATA_PATH+'png/'+str(id)+"single_input.png",dpi=300)
res.to_csv(path_or_buf=DATA_PATH+'csv/'+ str(id) + "single_input.csv",encoding="utf-8", index=False)
return res
def generate_sample(filepath,rate = 0.5,filename="sitaiqu/kilowatt_everyday_2year.xlsx"):
df = pd.read_excel(filename, sheet_name=dp.SN)
df.rename(columns={df.columns[0]: "index",
df.columns[1]: "redidentsID",
df.columns[2]: "userID",
df.columns[3]: "meterID",
df.columns[4]: "date",
df.columns[5]: "usage",
}, inplace=True)
df = df[df['meterID'] != dp.SMID]
df = df.drop_duplicates(['meterID', 'date'])
df['date'] = pd.to_datetime(df['date'])
df = df[df['usage'] >= 0]
df = df.sort_values(by='date')
ids = df['meterID'].to_frame()
ids = ids.drop_duplicates().reset_index()
# ids['meterID'].apply(op)
for i in range(0, ids.iloc[:, 0].size):
op(ids.loc[i, 'meterID'], df,rate,filepath)
def helper(x):
split = map(int, x.strip('[').strip(']').split(','))
d = {}
for counter, value in enumerate(split):
k = str(len(list(split)))+"-"+str(counter)
d[k] = int(value)
return d
def png_folder_processing(path,seed=42):
print("[INFO] loading images...")
data = []
labels = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
random.seed(seed)
#random.shuffle(imagePaths)
# loop over the input images
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
print(imagePath)
image = cv2.imread(imagePath)
image = cv2.resize(image, (128, 128))
#image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = img_to_array(image)
data.append(image)
# extract the class label from the image path and update the
# labels list
if str.find(imagePath,'bomb')!=-1:
# y_array.append(1)
labels.append(1)
print('1')
else:
# y_array.append(0)
labels.append(0)
print('0')
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# convert the labels from integers to vectors
return data,labels
def csv_folder_processing(DATAPATH='samples2/'):
X_array = []
y_array = []
line_nb = 0
col_num = 0
num_users = 1
for lists in os.listdir(DATAPATH):
sub_path = os.path.join(DATAPATH, lists)
# print(sub_path)
if os.path.isfile(sub_path):
num_users += 1
X = np.zeros((num_users, 729, 23), dtype=np.float32)
y = np.zeros(num_users, dtype=np.float32)
g = os.walk(DATAPATH)
for path,dir_list,file_list in g:
for j, file_name in enumerate(file_list, 1):
print(file_name)
if not file_name.startswith('.'): X_csv = csv_processing(csv_file_name = os.path.join(path, file_name), padding_line = 729)
X[j] = X_csv[:729, ...]
#
if file_name.split('.')[0].endswith('bomb'):
y[j] = 1
print('1')
else:
y[j] = 0
print('0')
return X
def date_helper(x, column_name):
return pd.Series(list(map(int, x[column_name].strip('[').strip(']').split(','))))
def csv_processing(csv_file_name = "", padding_line = 729):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str', 'year': 'str', 'numbers':'int', 'log':'float', 'id': 'str', 'usage': 'float'}
# , 'A_mean': 'float', 'V_mean': 'float'}
parse_dates = ['date']
print("start "+ csv_file_name)
df = pd.read_csv(csv_file_name, header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
df = df.sample(frac=1)
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
# df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() #7
# df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() #12
# df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() #3
df_week = df.apply(lambda x: date_helper(x, 'week'), axis=1)
df_month = df.apply(lambda x: date_helper(x, 'month'), axis=1)
df_year = df.apply(lambda x: date_helper(x, 'year'), axis=1)
'''
X_train = df[['super','com_date']].as_matrix()
X_train = np.column_stack((X_train, df_week, df_month, df_year))
Y_train = df[['error']].as_matrix()
'''
df_empty = df[[ 'usage', 'com_date']].copy()
# print(df_empty)
# ss_x = preprocessing.MaxAbsScaler()
ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:,[0]])
df_usage = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:,[1]])
df_com_date = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
# df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
# df_month = pd.DataFrame(df_month)
df_year = ss_x.fit_transform(df_year)
# df_year = pd.DataFrame(df_year)
# X_train = df_empty.ix[:,[2]].as_matrix()
X_train = np.column_stack((df_usage, df_week, df_month, df_year)) #+ 1 7 12 3 = 23
if df.shape[0]<=padding_line:
X_train = np.pad(X_train, ((0, padding_line), (0,0)), 'edge')
return X_train
def Combine_model(X1_train,X2_train):
VGGmodel = keras.applications.vgg16.VGG16(include_top=False
, weights='imagenet'
# , input_tensor=inputs
, input_shape=X1_train.shape[1:]
, pooling=None
# , classes=1000
)
x1= Flatten()(VGGmodel.output)
x1= Dropout(0.5)(x1)
x1= Dense(128)(x1)
xs = Input(X2_train.shape[1:])
x_image = conv.Convolution1D(16, 5, padding='same', init='he_normal', W_regularizer=l1(0.01))(xs)
x_image = BatchNormalization()(x_image)
x_image = Activation('relu')(x_image)
x_image = conv.Convolution1D(64, 5, padding='same', init='he_normal', W_regularizer=l1(0.01))(x_image)
x_image = Activation('relu')(x_image)
# x_image = BatchNormalization()(x_image)
# x_image = conv.Convolution2D(128, (2, 2), padding='same',init='he_normal')(x_image)
# x_image = Activation('relu')(x_image)
# x_image = BatchNormalization()(x_image)
x_image = Flatten()(x_image)
x_image = Dense(128, init='he_normal', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01))(x_image)
x_image = keras.layers.Add()([x1, x_image])
x_image = Dropout(0.5)(x_image)
preds = Dense(2, init='he_normal', activation='sigmoid')(x_image)
ada = Adadelta(lr=1e-3, rho=0.95, epsilon=1e-6)
model = Model([VGGmodel.input,xs], preds)
# Compile model
model.summary()
# model.compile(loss='mean_squared_error', optimizer=sgd)
model.compile(loss='categorical_crossentropy', optimizer=ada, metrics=['accuracy'])
#categorical_crossentropy
#binary_crossentropy
# history = model.fit(X_train, Y_train,
# batch_size = 20,
# epochs = 50,
# verbose = 1,
# validation_data = (X_test, Y_test))
# score = model.evaluate(X_test, Y_test, verbose=1)
# print('Test loss:', score[0])
# print('Test accuracy:', score[1])
return model
def kfold_evaluation_plot(X1, X2,y):
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
recalls = []
precisions = []
praucs = []
mean_precision = np.linspace(0, 1, 100)
mean_recall = np.linspace(0, 1, 100)
kf = KFold(n_splits=3,shuffle=True)
# print("Y:")
# print(y)
early_stopping = EarlyStopping(monitor='val_acc', verbose=1, patience=30)
lw = 2
i = 1
for train_index, test_index in kf.split(X1):
results_filename = str(time.time())
checkpoint = ModelCheckpoint("./weights/dnn_weights" + results_filename + ".h5", monitor='val_loss', verbose=1,
save_best_only=True)
X1_train, X1_test = X1[train_index], X1[test_index]
Y_train, Y_test = y[train_index], y[test_index]
X1_val, Y_val = X1_test, Y_test
X2_train, X2_test = X2[train_index], X2[test_index]
X2_val = X2_test,
Y_test = convert_y(Y_test)
# print(Y_test)
Y_train= convert_y(Y_train)
model = Combine_model(X1_train,X2_train)
model.fit([X1_train,X2_train], Y_train, batch_size=1, epochs=1000,
validation_data=([X1_test,X2_test], Y_test),
callbacks=[early_stopping, checkpoint])
prediction = model.predict([X1_test,X2_test])
print(prediction[1])
print(Y_test[1])
fpr, tpr, thresholds = m.roc_curve(Y_test.T[1], prediction.T[1], pos_label=1)
print("TPR:")
print(tpr)
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = m.auc(fpr, tpr)
aucs.append(roc_auc)
i += 1
# plt.subplot(1, 2, 1)
# plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
# label='Chance', alpha=.8)
print(tprs)
mean_tpr = np.mean(tprs, axis=0)
print("Mean_FPR")
print (mean_fpr, mean_tpr)
mean_tpr[-1] = 1.0
mean_auc = m.auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
print("end of function")
print (mean_auc,std_auc)
return mean_auc, std_auc
def convert_y(y):
y_dim2=[]
for i in y:
if (i==0): y_dim2.append([1,0])
if (i==1): y_dim2.append([0,1])
return np.array(y_dim2)
def run_cnn(path):
X1, y = png_folder_processing(path = path+'png/')
X2 = csv_folder_processing(DATAPATH = path+'csv/')
mean_auc, std_auc = kfold_evaluation_plot(X1, X2, y)
# print (mean_auc, std_auc)
return mean_auc, std_auc
if __name__ == '__main__':
# rate = 0.9
# while rate<0.96:
# file_path = dirs+str(rate)+'/'
# if not os.path.exists(file_path):
# os.makedirs(file_path)
# generate_sample(file_path,rate)
# rate+=0.01
auc_x=[]
auc_y=[]
stds = []
rate = 0.5
while rate<1:
rate = round(rate,2)
print ("Now working with rate ="+str(rate))
file_path = dirs+str(rate)+'/'
# if not os.path.exists(file_path):
# os.makedirs(file_path)
#generate_sample(file_path,rate)
aucc, std = run_cnn(file_path)
print ("************************************************")
print (aucc, std)
auc_x.append(rate)
auc_y.append(aucc)
stds.append(std)
rate += 0.1
print(auc_x)
print(auc_y)
upper = []
lower = []
for i in range(len(auc_y)):
upper.append(auc_y[i] + stds[i])
lower.append(auc_y[i] - stds[i])
plt.plot(auc_x,auc_y)
plt.fill_between(auc_x, lower, upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlabel('Rate')
plt.ylabel('Area Under Curve')
plt.savefig("sitaiqu/rates/rate"+str(time.time())+".png", dpi=300)
| 30.276978
| 193
| 0.61453
|
db4a1a6383b5bbc895eba85d0d432153717af512
| 670
|
py
|
Python
|
bin/checkgloss.py
|
encukou/teachtogether.tech
|
64c4e4b3ee6d45a0b489a5e5130123634aa32fea
|
[
"CC-BY-4.0"
] | null | null | null |
bin/checkgloss.py
|
encukou/teachtogether.tech
|
64c4e4b3ee6d45a0b489a5e5130123634aa32fea
|
[
"CC-BY-4.0"
] | 1
|
2018-09-14T10:12:27.000Z
|
2018-09-18T07:46:24.000Z
|
bin/checkgloss.py
|
encukou/teachtogether.tech
|
64c4e4b3ee6d45a0b489a5e5130123634aa32fea
|
[
"CC-BY-4.0"
] | 1
|
2018-10-20T20:04:56.000Z
|
2018-10-20T20:04:56.000Z
|
#!/usr/bin/env python
import sys
import re
DEF = re.compile(r'\*\*.+?\*\*{:#(g:.+?)}', re.DOTALL)
REF = re.compile(r'\[.+?\]\(#(g:.+?)\)', re.DOTALL)
def main(filenames):
defs = set()
refs = set()
for path in filenames:
with open(path, 'r') as reader:
doc = reader.read()
defs.update({d for d in DEF.findall(doc)})
refs.update({r for r in REF.findall(doc)})
report('missing', refs - defs)
report('unused', defs - refs)
def report(title, items):
if not items: return
print(title)
for i in sorted(items):
print(' {}'.format(i))
if __name__ == '__main__':
main(sys.argv[1:])
| 20.9375
| 54
| 0.541791
|
d2ffeb2b14e926a50c9465081bef69ccf0fd0041
| 1,280
|
py
|
Python
|
LeetCode/2018-12-21-74-Search-a-2D-Matrix.py
|
HeRuivio/-Algorithm
|
1fbe6256630758fda3af68f469471ee246730afc
|
[
"MIT"
] | 5
|
2018-10-30T05:07:32.000Z
|
2019-06-18T08:11:38.000Z
|
LeetCode/2018-12-21-74-Search-a-2D-Matrix.py
|
HeRuivio/-Algorithm
|
1fbe6256630758fda3af68f469471ee246730afc
|
[
"MIT"
] | 1
|
2020-05-09T09:05:16.000Z
|
2020-05-09T09:05:16.000Z
|
LeetCode/2018-12-21-74-Search-a-2D-Matrix.py
|
HeRuivio/-Algorithm
|
1fbe6256630758fda3af68f469471ee246730afc
|
[
"MIT"
] | 2
|
2020-05-09T09:02:22.000Z
|
2020-12-09T13:23:00.000Z
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2018-12-21 10:33:47
# @Last Modified by: 何睿
# @Last Modified time: 2018-12-21 10:56:40
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
# 如果矩阵为空,返回False
if not matrix or not matrix[0]:
return False
# 二分法遍历,首先确定target在哪一行
left, right, middle = 0, len(matrix)-1, 0
while left <= right:
middle = left+((right-left) >> 1)
if matrix[middle][0] < target:
left = middle+1
elif matrix[middle][0] > target:
right = middle-1
else:
return True
row = right
left, right = 0, len(matrix[0])-1
# 二分法遍历,确定当前值在哪一个位置
while left <= right:
middle = left+((right-left) >> 1)
if matrix[row][middle] < target:
left = middle+1
elif matrix[row][middle] > target:
right = middle - 1
else:
return True
return False
if __name__ == "__main__":
so = Solution()
res = so.searchMatrix(
[[]], 1001)
print(res)
| 27.234043
| 49
| 0.480469
|
f64950a91d30cbba26a563b3e6f55220133ce1d6
| 3,226
|
py
|
Python
|
src/python/examples/simple_http_model_control.py
|
theHamsta/client
|
a0cfb8adc1f281da650c0974b4c7ddd5dc751358
|
[
"BSD-3-Clause"
] | 107
|
2021-04-20T03:14:18.000Z
|
2022-03-29T02:53:26.000Z
|
src/python/examples/simple_http_model_control.py
|
theHamsta/client
|
a0cfb8adc1f281da650c0974b4c7ddd5dc751358
|
[
"BSD-3-Clause"
] | 28
|
2021-05-05T12:05:57.000Z
|
2022-03-30T21:10:11.000Z
|
src/python/examples/simple_http_model_control.py
|
theHamsta/client
|
a0cfb8adc1f281da650c0974b4c7ddd5dc751358
|
[
"BSD-3-Clause"
] | 90
|
2021-04-20T14:12:34.000Z
|
2022-03-30T02:40:14.000Z
|
#!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-u',
'--url',
type=str,
required=False,
default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
FLAGS = parser.parse_args()
try:
triton_client = httpclient.InferenceServerClient(url=FLAGS.url,
verbose=FLAGS.verbose)
except Exception as e:
print("context creation failed: " + str(e))
sys.exit(1)
model_name = 'simple'
# There are seven models in the repository directory
if len(triton_client.get_model_repository_index()) != 7:
sys.exit(1)
triton_client.load_model(model_name)
if not triton_client.is_model_ready(model_name):
sys.exit(1)
triton_client.unload_model(model_name)
if triton_client.is_model_ready(model_name):
sys.exit(1)
# Trying to load wrong model name should emit exception
try:
triton_client.load_model("wrong_model_name")
except InferenceServerException as e:
if "failed to load" in e.message():
print("PASS: model control")
else:
sys.exit(1)
| 40.835443
| 80
| 0.67297
|
b60914f846ce453854ee6e675c9795045a9b2091
| 1,614
|
py
|
Python
|
opentelemetry-api/src/opentelemetry/context/thread_local_context.py
|
Skeen/opentelemetry-python
|
ad7a809a6dda7fbf73d9f47a3ff2ed85752cd0af
|
[
"Apache-2.0"
] | 1
|
2020-01-15T06:58:27.000Z
|
2020-01-15T06:58:27.000Z
|
opentelemetry-api/src/opentelemetry/context/thread_local_context.py
|
Skeen/opentelemetry-python
|
ad7a809a6dda7fbf73d9f47a3ff2ed85752cd0af
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-api/src/opentelemetry/context/thread_local_context.py
|
Skeen/opentelemetry-python
|
ad7a809a6dda7fbf73d9f47a3ff2ed85752cd0af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import typing # pylint: disable=unused-import
from . import base_context
class ThreadLocalRuntimeContext(base_context.BaseRuntimeContext):
class Slot(base_context.BaseRuntimeContext.Slot):
_thread_local = threading.local()
def __init__(self, name: str, default: "object"):
# pylint: disable=super-init-not-called
self.name = name
self.default = base_context.wrap_callable(
default
) # type: typing.Callable[..., object]
def clear(self) -> None:
setattr(self._thread_local, self.name, self.default())
def get(self) -> "object":
try:
got = getattr(self._thread_local, self.name) # type: object
return got
except AttributeError:
value = self.default()
self.set(value)
return value
def set(self, value: "object") -> None:
setattr(self._thread_local, self.name, value)
| 35.086957
| 76
| 0.651177
|
a6a059d81b4ba42cfea83b3559ee52fc73c2bf07
| 3,297
|
py
|
Python
|
config/validateRelease.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
config/validateRelease.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
config/validateRelease.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import argparse
import os
import tarfile
# A script to validate a Gaffer release archive
parser = argparse.ArgumentParser()
parser.add_argument(
"--archive",
dest = "archive",
required = True,
help = "The path to the build archive to publish."
)
args = parser.parse_args()
if not os.path.exists( args.archive ) :
parser.exit( 1, "The specified archive '%s' does not exist." % args.archive )
print( "Validating %s" % args.archive )
# Validate the release contains our mandatory components
requiredPaths = [
os.path.join( "doc", "gaffer", "html", "index.html" ),
os.path.join( "resources", "examples" )
]
for module in (
"Gaffer", "GafferAppleseed", "GafferArnold", "GafferDelight",
"GafferDispatch", "GafferImage", "GafferOSL", "GafferScene",
"GafferTractor", "GafferVDB"
) :
requiredPaths.append( os.path.join( "python", module ) )
requiredPaths.append( os.path.join( "python", "%sUI" % module ) )
with tarfile.open( args.archive, "r:gz" ) as a:
# getmember still reads the whole archive, so might as well grab them all
# as we go. We need to strip the first directory from all paths too as that
# contains the release name
archivePaths = { os.path.join( *m.name.split( os.sep )[1:] ) for m in a.getmembers() if os.sep in m.name }
missing = [ p for p in requiredPaths if p not in archivePaths ]
if missing :
parser.exit( 1,
"ERROR: The following are missing from the archive:\n%s"
% "\n".join( missing )
)
print( "Archive appears OK" )
| 36.230769
| 107
| 0.686078
|
07c7f0939a3ae12a7160251818a7e2ebdb19015f
| 1,165
|
py
|
Python
|
Algorithms/CaesarCipher.py
|
avyayjain/Python_Scripts
|
d89926a84d5df3cbd57244df27f215891aa60d20
|
[
"Unlicense"
] | 20
|
2020-08-17T13:49:03.000Z
|
2022-01-26T18:04:14.000Z
|
Algorithms/CaesarCipher.py
|
avyayjain/Python_Scripts
|
d89926a84d5df3cbd57244df27f215891aa60d20
|
[
"Unlicense"
] | 20
|
2020-09-30T15:49:38.000Z
|
2021-10-21T05:06:00.000Z
|
Algorithms/CaesarCipher.py
|
avyayjain/Python_Scripts
|
d89926a84d5df3cbd57244df27f215891aa60d20
|
[
"Unlicense"
] | 67
|
2020-08-17T18:23:11.000Z
|
2022-02-21T08:29:16.000Z
|
def caesar_cipher(message_to_encrypt: str, key: int, encrypt_direction: str) -> str:
alphabet = "abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ"
result = ""
for character in message_to_encrypt:
# returns the position of the chara"cter in alphabet array
position = alphabet.find(character)
if position == -1:
# character not found
result += character
else:
if encrypt_direction == "backward":
# if backward direction return 1 position in alphabet array
new_position = position - key
elif encrypt_direction == "forward":
# if forward direction advance 1 position in alphabet array
new_position = position + key
result += alphabet[new_position: new_position+1]
return result
if __name__ == "__main__":
message_to_encrypt = input("insert the message you want to encrypt: ")
key = int(input("insert the key you want to encrypt your text: "))
mode = input("insert the direction of the cipher: (backward) or (forward) ")
print(caesar_cipher(message_to_encrypt, key, mode))
| 43.148148
| 84
| 0.646352
|
68a8f7d0c33a509a8e74e315abf9e71138e8adfe
| 20,617
|
py
|
Python
|
dlk/tests/test_optimizer.py
|
tvlenin/blueoil
|
810680df75e2640f67d515c377ba2b4531b9e584
|
[
"Apache-2.0"
] | null | null | null |
dlk/tests/test_optimizer.py
|
tvlenin/blueoil
|
810680df75e2640f67d515c377ba2b4531b9e584
|
[
"Apache-2.0"
] | null | null | null |
dlk/tests/test_optimizer.py
|
tvlenin/blueoil
|
810680df75e2640f67d515c377ba2b4531b9e584
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test file for Optimizer."""
import unittest
from core.data_types import Float32, PackedUint32, Int32, QUANTIZED_NOT_PACKED
from core.optimizer import pass_remove_identities, pass_transpose, pass_constant_folding, \
pass_propagate_quantization_details_into_conv, pass_compute_thresholds, pass_pack_weights, \
pass_quantize_convolutions, pass_propagate_datatypes, pass_propagate_output_type_backward
from core.graph import Graph
from core.operators import Add, AveragePool, BatchNormalization, Constant, Conv, Identity, Input, \
MaxPool, Operator, Output, Transpose, QTZ_binary_mean_scaling, QTZ_linear_mid_tread_half, Reshape, Softmax, \
SpaceToDepth
import numpy as np
class TestPassTranspose(unittest.TestCase):
"""Test class for transposing pass."""
def test_pass_transpose(self) -> None:
"""Test code for transposing optimizer pass."""
data = np.random.rand(3, 2, 2, 1)
graph1 = self.create_sample_graph(data)
graph2 = self.create_expected_graph(data)
pass_transpose(graph1)
self.assertEqual(graph1, graph2, 'transpose to NHWC failed.')
print("Test pass #1 transpose passed!")
@staticmethod
def create_sample_graph(data: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [3, 5, 5, 1], Float32(), dimension_format='CWHN')
# constant and internal nodes
w = Constant('weight', Float32(), data, dimension_format='CWHN')
i1 = Identity('identity1', [3, 2, 2, 1], Float32(), {'input': w}, dimension_format='CWHN')
q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 1], Float32(), {'input': i1}, dimension_format='CWHN')
# Conv
conv = Conv('conv', [3, 4, 4, 1], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2], dimension_format='CWHN')
# One output
rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
y = Output('output', [1, 48], Float32(), {'input': rs},)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
@staticmethod
def create_expected_graph(data: np.ndarray) -> Graph:
graph = Graph()
data = data.transpose([3, 2, 1, 0])
# input
x = Input('placeholder', [1, 5, 5, 3], Float32(), dimension_format='NHWC')
# constant and internal nodes
w = Constant('weight', Float32(), data, dimension_format='NHWC')
i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w}, dimension_format='NHWC')
q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3], Float32(), {'input': i1}, dimension_format='NHWC')
# Conv
conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2], dimension_format='NHWC')
# One output
rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
y = Output('output', [1, 48], Float32(), {'input': rs},)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassRemoveIdentities(unittest.TestCase):
"""Test class for removing identity pass."""
def test_pass_remove_identities(self) -> None:
"""Test code for removing identities optimizer pass."""
data = np.random.rand(1, 2, 2, 3)
graph1 = self.create_sample_graph(data)
graph2 = self.create_expected_graph(data)
pass_remove_identities(graph1)
self.assertEqual(graph1, graph2, 'remove identities failed.')
print("Test pass #2 remove identities passed!")
@staticmethod
def create_sample_graph(data: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# constant and internal nodes
w = Constant('weight', Float32(), data)
i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w})
q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3], Float32(), {'input': i1})
# Conv
conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2])
# One output
i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv})
rs = Reshape('reshape', [1, 48], Float32(), {'data': i2})
y = Output('output', [1, 48], Float32(), {'input': rs},)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
@staticmethod
def create_expected_graph(data: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# constant and internal nodes
w = Constant('weight', Float32(), data)
q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3], Float32(), {'input': w})
# Conv
conv = Conv('conv', [1, 4, 4, 3], Float32(), {'X': x, 'W': q}, kernel_shape=[2, 2])
# One output
rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
y = Output('output', [1, 48], Float32(), {'input': rs},)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassPropagateQuantizationDetailsIntoConv(unittest.TestCase):
"""Test class for propagating quantization details into conv."""
def test_pass_propagate_quantization_details_into_conv(self) -> None:
"""Test pass."""
data1 = np.random.rand(1, 2, 2, 3)
data2 = np.random.rand(1, 2, 2, 3)
graph1 = self.create_sample_graph(data1, data2)
graph2 = self.create_expected_graph(data1, data2)
pass_propagate_quantization_details_into_conv(graph1)
aq_g1 = graph1.get_op('conv2').a_quantizer
aq_g2 = graph2.get_op('conv2').a_quantizer
kq_g1 = graph1.get_op('conv2').quantizer
kq_g2 = graph2.get_op('conv2').quantizer
self.assertEqual(len(aq_g1), len(aq_g2), '[Failed] Found number of activation quantizer not matched')
if aq_g1 and aq_g2:
self.assertEqual(aq_g1[0].op_type, aq_g2[0].op_type,
'[Failed] Found type of activation quantizer not matched')
self.assertEqual(kq_g1.op_type, kq_g2.op_type, '[Failed] Found type of kernel quantizer not matched')
self.assertEqual(graph1, graph2, '[Failed] Expected graph not matched')
print("Test pass #3 propagate_quantization_details_into_conv passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
# activation quantizer
s1 = Constant('aq_const1', Float32(), np.array(1))
s2 = Constant('aq_const2', Float32(), np.array(2))
aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})
# Conv2
w2 = Constant('weight2', Float32(), data2)
kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
# One output
y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
@staticmethod
def create_expected_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
# activation quantizer
s1 = Constant('aq_const1', Float32(), np.array(1))
s2 = Constant('aq_const2', Float32(), np.array(2))
aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})
# Conv2
w2 = Constant('weight2', Float32(), data2)
kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
conv2.a_quantizer = [aq]
conv2.quantizer = kq
# One output
y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassPackWeights(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_pack_weights(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
data2 = np.float32(np.random.rand(1, 2, 2, 3))
graph1 = self.create_sample_graph(data1, data2)
pass_pack_weights(graph1)
self.assertEqual(graph1.get_op('conv2').input_ops['W'].op_type, 'Constant',
'[Failed] Found input kernel weights not a constant')
graph_2_1 = self.create_sample_graph_2(data1)
graph_2_2 = self.create_sample_graph_2(data1)
pass_pack_weights(graph_2_2)
self.assertEqual(graph_2_1, graph_2_2,
'[Failed] Found optimized graph not the same')
print("Test pass #4 pack_weights passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
# activation quantizer
s1 = Constant('aq_const1', Float32(), np.array(1))
s2 = Constant('aq_const2', Float32(), np.array(2))
aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})
# Conv2
w2 = Constant('weight2', Float32(), data2)
kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
conv2.a_quantizer = [aq]
conv2.quantizer = kq
# One output
y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
@staticmethod
def create_sample_graph_2(data1: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))
add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})
y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassQuantizeConvolutions(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_quantize_convolutions(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
data2 = np.float32(np.random.rand(1, 2, 2, 3))
graph1 = self.create_sample_graph(data1, data2)
pass_quantize_convolutions(graph1)
self.assertEqual(graph1.get_op('aqtz1').dtype, QUANTIZED_NOT_PACKED(),
'[Failed] Found output dtype of activation quantizer not proper')
self.assertEqual(graph1.get_op('kqtz1').dtype, PackedUint32(),
'[Failed] Found output dtype of kernel quantizer not proper')
self.assertEqual(graph1.get_op('conv2').dtype, Float32(),
'[Failed] Found output dtype of conv not proper')
print("Test pass #5 quantize_convolutions passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
# activation quantizer
s1 = Constant('aq_const1', Float32(), np.array(1))
s2 = Constant('aq_const2', Float32(), np.array(2))
aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})
# Conv2
w2 = Constant('weight2', Float32(), data2)
kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
conv2.a_quantizer = [aq]
conv2.quantizer = kq
# One output
y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassPropagateDatatypes(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_propagate_datatypes(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
graph1 = self.create_sample_graph(data1)
# graph2 = self.create_expected_graph(data1, data2)
pass_propagate_datatypes(graph1)
self.assertEqual(graph1.get_op('s2d').dtype, QUANTIZED_NOT_PACKED(),
'[Failed] Found dtype of SpaceToDepth not propagate correctly')
print("Test pass #6 propagate data types passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], QUANTIZED_NOT_PACKED(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})
# One output
y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassPropagateOutputTypeBackward(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_propagate_output_type_backward(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
graph1 = self.create_sample_graph(data1)
pass_propagate_output_type_backward(graph1)
self.assertEqual(graph1.get_op('conv1').dtype, Float32(),
'[Failed] Found dtype of SpaceToDepth not propagate correctly')
print("Test pass #7 propagate output type backward passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], QUANTIZED_NOT_PACKED(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
conv1.is_quantized = True
pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})
# One output
y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassComputeThresholds(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_compute_thresholds(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
data2 = np.float32(np.random.rand(1, 2, 2, 3))
graph1 = self.create_sample_graph(data1, data2)
pass_compute_thresholds(graph1)
self.assertEqual(graph1.get_op('conv2').has_thresholds, True,
'[Failed] Found threshold of Conv not calculated')
print("Test pass #8 compute_thresholds passed!")
def test_pass_compute_thresholds_for_huge_threshold_values(self) -> None:
"""Test pass."""
data1 = np.float32(np.random.rand(1, 2, 2, 3))
data2 = np.float32(np.random.uniform(10 ** (-30), 10 ** (-40), size=(1, 2, 2, 3)))
graph1 = self.create_sample_graph(data1, data2)
pass_compute_thresholds(graph1)
self.assertEqual(graph1.get_op('conv2').has_thresholds, True,
'[Failed] Found threshold of Conv not calculated')
print("Test pass #8-1 compute_thresholds of enormous values passed!")
@staticmethod
def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input('placeholder', [1, 5, 5, 3], Float32())
# Conv1
w1 = Constant('weight1', Float32(), data1)
conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])
# activation quantizer
s1 = Constant('aq_const1', Int32(), np.array([2], dtype=np.int32))
s2 = Constant('aq_const2', Float32(), np.array([2.0], dtype=np.float32))
aq1 = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})
# Conv2
w2 = Constant('weight2', Float32(), data2)
kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq1, 'W': kq}, kernel_shape=[2, 2])
conv2.a_quantizer = [aq1]
conv2.quantizer = kq
conv2.is_quantized = True
sc = Constant('bn_scale', Float32(), np.random.rand(3))
be = Constant('bn_b', Float32(), np.random.rand(3))
mu = Constant('bn_mu', Float32(), np.random.rand(3))
va = Constant('bn_var', Float32(), np.random.rand(3))
bn = BatchNormalization('bn', [1, 3, 3, 3], Float32(), {'X': conv2,
'scale': sc,
'B': be,
'mean': mu,
'var': va})
# activation quantizer
s3 = Constant('aq_const3', Int32(), np.array([2], dtype=np.int32))
s4 = Constant('aq_const4', Float32(), np.array([2.0], dtype=np.float32))
aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {'X': bn, 'Y': s3, 'Z': s4})
# One output
y = Output('output', [1, 3, 3, 3], Float32(), {'input': aq2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
class TestPassConstantFolding(unittest.TestCase):
"""Test class for packing weight."""
def test_pass_constant_folding(self) -> None:
"""Test pass."""
graph1 = self.create_sample_graph()
pass_constant_folding(graph1)
self.assertEqual(set(graph1.get_op('potatoes_new').data), set(np.array([2, 5])),
'[Failed] Found folded constant not correct')
print("Test pass #9 constant folding passed!")
@staticmethod
def create_sample_graph() -> Graph:
graph = Graph()
x = Input('placeholder', [2], Float32())
s1 = Constant('potato_1', Float32(), np.array([1, 2]))
s2 = Constant('potato_2', Float32(), np.array([1, 3]))
add1 = Add('potatoes', [2], Float32(), {'A': s1, 'B': s2})
add2 = Add('more_potatoes', [2], Float32(), {'A': x, 'B': add1})
# One output
y = Output('output', [2], Float32(), {'input': add2})
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
if __name__ == '__main__':
unittest.main()
| 37.349638
| 116
| 0.578988
|
842e9a36f58a0ab6dcb760a38a97f1c99b3a26d2
| 3,164
|
py
|
Python
|
Canvas.py
|
PositivePeriod/Touchable
|
8ecb69bd72f16bc0c244c2e983316659d2db1eb5
|
[
"MIT"
] | 1
|
2020-07-24T19:29:24.000Z
|
2020-07-24T19:29:24.000Z
|
Canvas.py
|
PositivePeriod/Touchable
|
8ecb69bd72f16bc0c244c2e983316659d2db1eb5
|
[
"MIT"
] | 2
|
2022-01-13T03:01:41.000Z
|
2022-03-12T00:40:55.000Z
|
Canvas.py
|
PositivePeriod/Touchable
|
8ecb69bd72f16bc0c244c2e983316659d2db1eb5
|
[
"MIT"
] | null | null | null |
from scipy import interpolate
from Function import *
class Canvas:
def __init__(self):
self.to_draw = []
self.to_draw_image = None
self.canvas = None
def clear(self):
self.to_draw.clear()
def erase(self):
if len(self.to_draw):
self.to_draw.pop()
def new_draw(self, command):
self.to_draw.append(command)
def set_canvas(self, canvas):
self.canvas = canvas
def pop(self):
return self.to_draw.pop()
def draw(self, scale, width_margin, height_margin):
if self.to_draw_image is not None:
self.canvas.create_image() # TODO
for obj in self.to_draw:
color = color_type(obj[1], 'rgb', 'hex')
size = obj[2]
if obj[0] == 'interpolate':
try:
tck = interpolate.splprep([numpy.array(obj[3]), numpy.array(obj[4])], s=0)[0]
u_new = numpy.arange(0, 1.001, 0.001)
out = interpolate.splev(u_new, tck)
inp = []
for i in range(len(out[0])):
x, y = convert_pos(scale, width_margin, height_margin, x=out[0][i], y=out[1][i])
inp.extend([x, y])
self.canvas.create_line(*inp, fill=color, width=obj[2])
except TypeError as e:
points = []
for i in range(len(obj[3])):
point = convert_pos(scale, width_margin, height_margin, x=obj[3][i], y=obj[4][i])
points.extend(point)
self.canvas.create_line(*points, fill=color, width=obj[2])
if str(e) != 'm > k must hold':
print(e)
finally:
continue
elif obj[0] == 'line':
p = tuple(obj[3][i] * scale + (width_margin if i % 2 == 0 else height_margin) for i in range(len(obj[3])))
self.canvas.create_line(*p, fill=color, width=size)
continue
x1, y1 = convert_pos(scale, width_margin, height_margin, x=obj[3][0], y=obj[3][1])
if obj[0] == 'point':
self.canvas.create_oval(x1 - 1, y1 - 1, x1 + 1, y1 + 1, fill=color, outline=color, width=size)
continue
x2, y2 = convert_pos(scale, width_margin, height_margin, x=obj[4][0], y=obj[4][1])
if obj[0] == 'rectangle':
if x1 != x2 and y1 != y2:
x1_, y1_, x2_, y2_ = min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)
self.canvas.create_rectangle(x1_, y1_, x2_, y2_, outline=color, width=size)
elif obj[0] == 'line':
if obj[3] != obj[4]:
self.canvas.create_line(x1, y1, x2, y2, fill=color, width=size)
elif obj[0] == 'circle':
r = int(square_distance((x1, y1), (x2, y2), root=True))
if r != 0:
self.canvas.create_oval(x1 - r, y1 - r, x1 + r, y1 + r, outline=color, width=size)
else:
print(f'Error; draw; {obj}')
| 41.631579
| 122
| 0.48957
|
5b38adf4bcf4be9700cd81606f9a579fcd4b5530
| 433
|
py
|
Python
|
code/pyclient/test-cassandra.py
|
BastienBP/boonta_kafka_streams
|
bb63a42702a3ae6c1266d9b3bcc77e162c1ba559
|
[
"MIT"
] | 20
|
2016-09-26T09:15:26.000Z
|
2018-11-20T12:31:07.000Z
|
code/pyclient/test-cassandra.py
|
BastienBP/boonta_kafka_streams
|
bb63a42702a3ae6c1266d9b3bcc77e162c1ba559
|
[
"MIT"
] | 2
|
2016-09-23T14:01:48.000Z
|
2017-03-28T17:19:41.000Z
|
code/pyclient/test-cassandra.py
|
BastienBP/boonta_kafka_streams
|
bb63a42702a3ae6c1266d9b3bcc77e162c1ba559
|
[
"MIT"
] | 11
|
2017-01-30T16:20:56.000Z
|
2018-10-07T01:33:08.000Z
|
from cassandra.cluster import Cluster
cluster=Cluster(['cassandra1', 'cassandra2', 'cassandra3'])
session=cluster.connect('boontadata')
session.execute("INSERT INTO raw_events (message_id, device_id, device_time, category, measure1, measure2) VALUES ('pysample-1', 'pysample', 1472222537503, "test", 100, 1234.56)")
rows = session.execute("SELECT * FROM raw_events")
for row in rows:
print(row)
print("OK")
cluster.shutdown()
| 33.307692
| 179
| 0.750577
|
9a76a67244fd8d5f53f59072a7a8dbcdfd053cb7
| 677
|
py
|
Python
|
utils/random.py
|
Samoray-l337/CryptoGuesser
|
7e9df5a15b3cb9958518fbcb008a03bb9364d3da
|
[
"MIT"
] | null | null | null |
utils/random.py
|
Samoray-l337/CryptoGuesser
|
7e9df5a15b3cb9958518fbcb008a03bb9364d3da
|
[
"MIT"
] | null | null | null |
utils/random.py
|
Samoray-l337/CryptoGuesser
|
7e9df5a15b3cb9958518fbcb008a03bb9364d3da
|
[
"MIT"
] | null | null | null |
import random
def random_abc_key():
return random.randint(1, 26)
def random_subtitution_array():
array_key = {}
while len(array_key.keys()) < 13:
key = chr(random_abc_key() + ord('a') - 1)
value = chr(random_abc_key() + ord('a') - 1)
if key in array_key.keys() or value in array_key.values():
continue
array_key[key] = value
return array_key
def random_subtitution_string():
array_string = ''
while len(array_string) < 26:
value = chr(random_abc_key() + ord('a') - 1)
if value in array_string:
continue
array_string += value
return array_string
| 23.344828
| 66
| 0.589365
|
1245b5e0b0c83e7839956f627746798d961450ad
| 422
|
py
|
Python
|
dico/model/__init__.py
|
AkiaCode/dico
|
d67c0c0da0370538723bcc852ad4a6b1ac00a512
|
[
"MIT"
] | 1
|
2021-09-02T15:59:18.000Z
|
2021-09-02T15:59:18.000Z
|
dico/model/__init__.py
|
AkiaCode/dico
|
d67c0c0da0370538723bcc852ad4a6b1ac00a512
|
[
"MIT"
] | null | null | null |
dico/model/__init__.py
|
AkiaCode/dico
|
d67c0c0da0370538723bcc852ad4a6b1ac00a512
|
[
"MIT"
] | null | null | null |
from .application import *
from .audit_log import *
from .channel import *
from .emoji import *
from .event import *
from .extras import *
from .gateway import *
from .guild import *
from .guild_template import *
from .invite import *
from .permission import *
from .snowflake import *
from .stage import *
from .sticker import *
from .user import *
from .voice import *
from .webhook import *
from .interactions import *
| 22.210526
| 29
| 0.744076
|
e71ad71c002ad171c3ca267ecf302921364b695a
| 380
|
py
|
Python
|
pt01_object,design/cpt01/ticket_seller.py
|
s3ich4n/object_study
|
302108212d1d1f18ae57135145416513de3cb7c2
|
[
"MIT"
] | null | null | null |
pt01_object,design/cpt01/ticket_seller.py
|
s3ich4n/object_study
|
302108212d1d1f18ae57135145416513de3cb7c2
|
[
"MIT"
] | 2
|
2021-06-03T18:45:13.000Z
|
2021-06-04T13:09:29.000Z
|
pt01_object,design/cpt01/ticket_seller.py
|
s3ich4n/object_study
|
302108212d1d1f18ae57135145416513de3cb7c2
|
[
"MIT"
] | null | null | null |
#
# 판매원 객체에 대한 코드
#
# @author Seongeun Yu (s3ich4n@gmail.com)
# @date 2021/06/04 02:43 created.
#
from ticket_office import TicketOffice
class TicketSeller:
def __init__(
self,
ticket_office: TicketOffice,
):
self._ticket_office = ticket_office
@property
def ticket_office(self):
return self._ticket_office
| 17.272727
| 46
| 0.628947
|
1d577c43ca67e89334360f4bd51eb9840c1ae0b5
| 1,488
|
py
|
Python
|
src/drone_ai/scripts/helpers/control/objects.py
|
ElHouas/drone_sim
|
e42f49d8536855516b3881c016f828e8525e4af3
|
[
"MIT"
] | null | null | null |
src/drone_ai/scripts/helpers/control/objects.py
|
ElHouas/drone_sim
|
e42f49d8536855516b3881c016f828e8525e4af3
|
[
"MIT"
] | null | null | null |
src/drone_ai/scripts/helpers/control/objects.py
|
ElHouas/drone_sim
|
e42f49d8536855516b3881c016f828e8525e4af3
|
[
"MIT"
] | 1
|
2020-12-18T21:14:33.000Z
|
2020-12-18T21:14:33.000Z
|
#! /usr/bin/env python
import rospy
import time
from std_msgs.msg import Empty
from geometry_msgs.msg import Twist
class Control():
def __init__(self):
self.ctrl_c = False
self._pub_cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._move_msg = Twist()
self._pub_takeoff = rospy.Publisher('/drone/takeoff', Empty, queue_size=1)
self._takeoff_msg = Empty()
self._pub_land = rospy.Publisher('/drone/land', Empty, queue_size=1)
self._land_msg = Empty()
def stop(self):
rospy.loginfo("Stopping...")
self._move_msg.linear.x = 0.0
self._move_msg.angular.z = 0.0
self._pub_cmd_vel.publish(self._move_msg)
def takeoff(self):
rospy.loginfo('Taking off...')
i=0
while not i == 3:
self._pub_takeoff.publish(self._takeoff_msg)
time.sleep(1)
i += 1
def land(self):
rospy.loginfo('Landing...')
i=0
while not i == 3:
self._pub_land.publish(self._land_msg)
time.sleep(1)
i += 1
def turn(self):
rospy.loginfo("Turning...")
self._move_msg.linear.x = 0.0
self._move_msg.angular.z = 1.0
self_pub_cmd_vel.publish(self._move_msg)
def move_forward(self):
rospy.loginfo("Moving forward...")
self._move_msg.linear.x = 1.0
self._move_msg.angular.z = 0.0
self_pub_cmd_vel.publish(self._move_msg)
| 29.76
| 82
| 0.596774
|
133a40bcec8bb4d45d7a75fc81231fee770fe080
| 3,220
|
py
|
Python
|
archs4py/geo.py
|
MaayanLab/archs4pipeline
|
3d9599cb2942d839ce2f73d0ca1fd5629919dd1d
|
[
"Apache-2.0"
] | null | null | null |
archs4py/geo.py
|
MaayanLab/archs4pipeline
|
3d9599cb2942d839ce2f73d0ca1fd5629919dd1d
|
[
"Apache-2.0"
] | null | null | null |
archs4py/geo.py
|
MaayanLab/archs4pipeline
|
3d9599cb2942d839ce2f73d0ca1fd5629919dd1d
|
[
"Apache-2.0"
] | null | null | null |
import urllib
import GEOparse
import os
import pandas as pd
from tqdm import tqdm
import multiprocessing
import contextlib
import io
import sys
def parse_platform(platform, srr, processed_gsms):
os.makedirs("downloads/soft", exist_ok=True)
p = platform
p1 = p[0:5]+"nnn"
p2 = p[0:9]
url = "ftp://ftp.ncbi.nlm.nih.gov/geo/platforms/"+p1+"/"+p2+"/soft/"+p2+"_family.soft.gz"
urllib.request.urlretrieve(url, "downloads/soft/"+p2+".soft.gz")
geo = GEOparse.get_GEO(filepath="downloads/soft/"+p2+".soft.gz", silent=True)
gsmids = []
for gsmid, gsm in geo.gsms.items():
gsmids.append(gsmid)
matching_gsms = list(set(srr.index).intersection(set(gsmids)))
new_gsms = list(set(matching_gsms).difference(set(list(processed_gsms[0]))))
sll = srr.loc[new_gsms]
#chunk_samples = chunk(matching_gsms, 1000)
#with multiprocessing.Pool(6) as pool:
# parameters = [(x, geo, sll) for x in chunk_samples]
# res = list(tqdm(pool.imap(check_gsm_star, parameters), desc="Scanning gsms", total=len(parameters)))
#res = pd.concat(res)
return check_gsm(new_gsms, geo, sll)
def chunk(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def check_gsm_star(args):
return check_gsm(*args)
def check_gsm(new_gsms, geo, sll):
sralist = []
for gsmid in new_gsms:
gsm = geo.gsms[gsmid]
if gsm.metadata['library_strategy'][0] == 'RNA-Seq':
series = gsm.metadata["series_id"][0]
species = gsm.metadata["taxid_ch1"][0]
sralist.append([series, gsmid, species])
mm = pd.DataFrame(sralist)
if mm.shape[0] == 0:
return pd.DataFrame(columns=["gse", "gsm", "sra", "species"])
else:
mm.index = mm.iloc[:,1]
mm = mm.join(sll, how="inner").iloc[:,[0,1,3,2]]
mm.columns = ["gse", "gsm", "sra", "species"]
return mm
def scan_platforms(srr, processed_gsm):
platforms = ["GPL24676", "GPL24247", "GPL21626", "GPL21697", "GPL21273", "GPL20795", "GPL21493", "GPL21103", "GPL19057", "GPL18480", "GPL17021", "GPL15103", "GPL13112", "GPL21290", "GPL20301", "GPL18573", "GPL18460", "GPL16791", "GPL15433", "GPL11154", "GPL23227", "GPL23479"]
platform_results = []
for p in tqdm(platforms):
res = parse_platform(p, srr, processed_gsm)
platform_results.append(res)
return pd.concat(platform_results)
def fast_geo(platform, srr, processed_gsms):
os.makedirs("downloads/soft", exist_ok=True)
p = platform
p1 = p[0:5]+"nnn"
p2 = p[0:9]
url = "ftp://ftp.ncbi.nlm.nih.gov/geo/platforms/"+p1+"/"+p2+"/soft/"+p2+"_family.soft.gz"
urllib.request.urlretrieve(url, "downloads/soft/"+p2+".soft.gz")
os.system("zgrep '\^SAMPLE\|!Sample_library_strategy' downloads/soft/"+p2+".soft.gz > temp.tsv")
f=open('temp.tsv','r')
lines = f.readlines()
f.close()
lines = [x.strip().replace("^SAMPLE = ", "") for x in lines]
lines = [x.replace("!Sample_library_strategy = ", "") for x in lines]
lines = [x.replace("!Sample_taxid_ch1 = ", "") for x in lines]
cc = chunk(lines,2)
df = pd.DataFrame(cc)
rna_samples = df[df[1] == "RNA-Seq"][0]
rna_samples.intersection(srr)
| 36.590909
| 280
| 0.631988
|
190fa901d81d88277676f6d87a5ee7564b71f25a
| 4,068
|
py
|
Python
|
authlib/flask/oauth2/resource_protector.py
|
tk193192/authlib
|
4c60a628f64c6d385a06ea55e416092726b94d07
|
[
"BSD-3-Clause"
] | 2
|
2021-04-26T18:17:37.000Z
|
2021-04-28T21:39:45.000Z
|
authlib/flask/oauth2/resource_protector.py
|
tk193192/authlib
|
4c60a628f64c6d385a06ea55e416092726b94d07
|
[
"BSD-3-Clause"
] | null | null | null |
authlib/flask/oauth2/resource_protector.py
|
tk193192/authlib
|
4c60a628f64c6d385a06ea55e416092726b94d07
|
[
"BSD-3-Clause"
] | 1
|
2019-10-07T02:01:48.000Z
|
2019-10-07T02:01:48.000Z
|
import functools
from contextlib import contextmanager
from flask import json
from flask import request as _req
from flask import _app_ctx_stack
from werkzeug.local import LocalProxy
from authlib.oauth2 import (
OAuth2Error,
ResourceProtector as _ResourceProtector
)
from authlib.oauth2.rfc6749 import (
MissingAuthorizationError,
TokenRequest,
)
from .signals import token_authenticated
from ..error import raise_http_exception
class ResourceProtector(_ResourceProtector):
"""A protecting method for resource servers. Creating a ``require_oauth``
decorator easily with ResourceProtector::
from authlib.flask.oauth2 import ResourceProtector
require_oauth = ResourceProtector()
# add bearer token validator
from authlib.oauth2.rfc6750 import BearerTokenValidator
from project.models import Token
class MyBearerTokenValidator(BearerTokenValidator):
def authenticate_token(self, token_string):
return Token.query.filter_by(access_token=token_string).first()
def request_invalid(self, request):
return False
def token_revoked(self, token):
return False
require_oauth.register_token_validator(MyBearerTokenValidator())
# protect resource with require_oauth
@app.route('/user')
@require_oauth('profile')
def user_profile():
user = User.query.get(current_token.user_id)
return jsonify(user.to_dict())
"""
def raise_error_response(self, error):
"""Raise HTTPException for OAuth2Error. Developers can re-implement
this method to customize the error response.
:param error: OAuth2Error
:raise: HTTPException
"""
status = error.status_code
body = json.dumps(dict(error.get_body()))
headers = error.get_headers()
raise_http_exception(status, body, headers)
def acquire_token(self, scope=None, operator='AND'):
"""A method to acquire current valid token with the given scope.
:param scope: string or list of scope values
:param operator: value of "AND" or "OR"
:return: token object
"""
request = TokenRequest(
_req.method,
_req.full_path,
_req.data,
_req.headers
)
if not callable(operator):
operator = operator.upper()
token = self.validate_request(scope, request, operator)
token_authenticated.send(self, token=token)
ctx = _app_ctx_stack.top
ctx.authlib_server_oauth2_token = token
return token
@contextmanager
def acquire(self, scope=None, operator='AND'):
"""The with statement of ``require_oauth``. Instead of using a
decorator, you can use a with statement instead::
@app.route('/api/user')
def user_api():
with require_oauth.acquire('profile') as token:
user = User.query.get(token.user_id)
return jsonify(user.to_dict())
"""
try:
yield self.acquire_token(scope, operator)
except OAuth2Error as error:
self.raise_error_response(error)
def __call__(self, scope=None, operator='AND', optional=False):
def wrapper(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
try:
self.acquire_token(scope, operator)
except MissingAuthorizationError as error:
if optional:
return f(*args, **kwargs)
self.raise_error_response(error)
except OAuth2Error as error:
self.raise_error_response(error)
return f(*args, **kwargs)
return decorated
return wrapper
def _get_current_token():
ctx = _app_ctx_stack.top
return getattr(ctx, 'authlib_server_oauth2_token', None)
current_token = LocalProxy(_get_current_token)
| 32.806452
| 79
| 0.632743
|
8809761f6b7f6dd05e4c5b852c86cd06a3375468
| 2,102
|
py
|
Python
|
pyleecan/Methods/Simulation/IndMagFEMM/comp_inductance.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | 1
|
2021-11-10T11:52:57.000Z
|
2021-11-10T11:52:57.000Z
|
pyleecan/Methods/Simulation/IndMagFEMM/comp_inductance.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Simulation/IndMagFEMM/comp_inductance.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | null | null | null |
from os.path import join
from ....Classes.InputCurrent import InputCurrent
from ....Classes.MagFEMM import MagFEMM
from ....Classes.Simulation import Simulation
from ....Classes.Simu1 import Simu1
from ....Functions.Electrical.dqh_transformation import n2dqh_DataTime
def comp_inductance(self, machine, OP_ref):
"""Compute using FEMM the inductance (Current driven only)
Parameters
----------
self : IndMagFEMM
an IndMagFEMM object
machine : Machine
a Machine object
OP_ref: OperatingPoint
an OP object
Returns
----------
Phi_d_mean: float
Flux linkage along d-axis
"""
self.get_logger().info("Compute dq inductances with FEMM")
# Get simulation name and result path
if isinstance(machine.parent, Simulation) and machine.parent.name not in [None, ""]:
simu_name = machine.parent.name + "_IndMagFEMM"
path_result = (
join(machine.parent.path_result, "IndMagFEMM")
if machine.parent.path_result not in [None, ""]
else None
)
elif machine.name not in [None, ""]:
simu_name = machine.name + "_IndMagFEMM"
path_result = None
else:
simu_name = "IndMagFEMM"
path_result = None
# Define simulation
simu_ind = Simu1(
elec=None, name=simu_name, path_result=path_result, machine=machine
)
simu_ind.input = InputCurrent(OP=OP_ref, Nt_tot=self.Nt_tot, Na_tot=2048)
simu_ind.mag = MagFEMM(
is_periodicity_t=True,
is_periodicity_a=self.is_periodicity_a,
is_sliding_band=self.is_sliding_band,
Kgeo_fineness=self.Kgeo_fineness,
type_calc_leakage=self.type_calc_leakage,
nb_worker=self.nb_worker,
)
# Run Simulation
out_ind = simu_ind.run()
# Post-Process
stator_label = machine.stator.get_label()
Phidqh = n2dqh_DataTime(
out_ind.mag.Phi_wind[stator_label], phase_dir=out_ind.elec.phase_dir
)
Phi_dqh_mean = Phidqh.get_along("time=mean", "phase")[Phidqh.symbol]
return (Phi_dqh_mean[0], Phi_dqh_mean[1])
| 30.028571
| 88
| 0.666032
|
a6e8c54ced1d3b2093636edb3c923a186a434515
| 1,600
|
py
|
Python
|
src/python/icurry/types/iobject.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | 1
|
2022-03-16T16:37:11.000Z
|
2022-03-16T16:37:11.000Z
|
src/python/icurry/types/iobject.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | null | null | null |
src/python/icurry/types/iobject.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | null | null | null |
from ...utility import curryname
from ...utility.proptree import proptree
import collections, six, weakref
__all__ = ['IArity', 'IObject', 'IVarIndex']
IArity = int
IVarIndex = int
class IObject(object):
def __init__(self, metadata=None):
if metadata is not None:
self._metadata = proptree(metadata)
@property
def metadata(self):
return getattr(self, '_metadata', {})
def update_metadata(self, items):
md = getattr(self.metadata, '_asdict', self.metadata)
md.update(items)
self._metadata = proptree(md)
@property
def children(self):
return ()
def copy(self, **updates):
'''Copy an IObject with optional updates.'''
data = self.dict()
data.update(**updates)
return type(self)(**data)
def dict(self):
'''Get the object properties as a dictionary.'''
if hasattr(self, '_fields_'):
return collections.OrderedDict(
(name, getattr(self, name)) for name in self._fields_
)
else:
return {
k:v for k,v in six.iteritems(self.__dict__)
if k != 'metadata'
}
# Make objects comparable by their contents.
def __eq__(lhs, rhs):
if rhs is None:
return False
if type(lhs) != type(rhs):
return False
# Compare dicts.
return lhs.dict() == rhs.dict()
def __ne__(lhs, rhs):
return not (lhs == rhs)
def __hash__(self):
return hash(tuple(sorted(self.dict())))
def __repr__(self):
return '%s(%s)' % (
type(self).__name__
, ', '.join(
'%s=%r' % item for item in self.dict().items()
)
)
| 23.188406
| 62
| 0.608125
|
23bafc64cfd30e297f44696b500c8eb508faea1a
| 16,092
|
py
|
Python
|
frappe/desk/moduleview.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/desk/moduleview.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/desk/moduleview.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe import _
from frappe.boot import get_allowed_pages, get_allowed_reports
from frappe.cache_manager import (
build_domain_restriced_doctype_cache,
build_domain_restriced_page_cache,
build_table_count_cache,
)
from frappe.desk.doctype.desktop_icon.desktop_icon import clear_desktop_icons_cache, set_hidden
@frappe.whitelist()
def get(module):
"""Returns data (sections, list of reports, counts) to render module view in desk:
`/desk/#Module/[name]`."""
data = get_data(module)
out = {"data": data}
return out
@frappe.whitelist()
def hide_module(module):
set_hidden(module, frappe.session.user, 1)
clear_desktop_icons_cache()
def get_table_with_counts():
counts = frappe.cache().get_value("information_schema:counts")
if counts:
return counts
else:
return build_table_count_cache()
def get_data(module, build=True):
"""Get module data for the module view `desk/#Module/[name]`"""
doctype_info = get_doctype_info(module)
data = build_config_from_file(module)
if not data:
data = build_standard_config(module, doctype_info)
else:
add_custom_doctypes(data, doctype_info)
add_section(data, _("Custom Reports"), "fa fa-list-alt", get_report_list(module))
data = combine_common_sections(data)
data = apply_permissions(data)
# set_last_modified(data)
if build:
exists_cache = get_table_with_counts()
def doctype_contains_a_record(name):
exists = exists_cache.get(name)
if not exists:
if not frappe.db.get_value("DocType", name, "issingle"):
exists = frappe.db.count(name)
else:
exists = True
exists_cache[name] = exists
return exists
for section in data:
for item in section["items"]:
# Onboarding
# First disable based on exists of depends_on list
doctype = item.get("doctype")
dependencies = item.get("dependencies") or None
if not dependencies and doctype:
item["dependencies"] = [doctype]
dependencies = item.get("dependencies")
if dependencies:
incomplete_dependencies = [d for d in dependencies if not doctype_contains_a_record(d)]
if len(incomplete_dependencies):
item["incomplete_dependencies"] = incomplete_dependencies
if item.get("onboard"):
# Mark Spotlights for initial
if item.get("type") == "doctype":
name = item.get("name")
count = doctype_contains_a_record(name)
item["count"] = count
return data
def build_config_from_file(module):
"""Build module info from `app/config/desktop.py` files."""
data = []
module = frappe.scrub(module)
for app in frappe.get_installed_apps():
try:
data += get_config(app, module)
except ImportError:
pass
return filter_by_restrict_to_domain(data)
def filter_by_restrict_to_domain(data):
"""filter Pages and DocType depending on the Active Module(s)"""
doctypes = (
frappe.cache().get_value("domain_restricted_doctypes") or build_domain_restriced_doctype_cache()
)
pages = frappe.cache().get_value("domain_restricted_pages") or build_domain_restriced_page_cache()
for d in data:
_items = []
for item in d.get("items", []):
item_type = item.get("type")
item_name = item.get("name")
if (item_name in pages) or (item_name in doctypes) or item_type == "report":
_items.append(item)
d.update({"items": _items})
return data
def build_standard_config(module, doctype_info):
"""Build standard module data from DocTypes."""
if not frappe.db.get_value("Module Def", module):
frappe.throw(_("Module Not Found"))
data = []
add_section(
data,
_("Documents"),
"fa fa-star",
[d for d in doctype_info if d.document_type in ("Document", "Transaction")],
)
add_section(
data,
_("Setup"),
"fa fa-cog",
[d for d in doctype_info if d.document_type in ("Master", "Setup", "")],
)
add_section(data, _("Standard Reports"), "fa fa-list", get_report_list(module, is_standard="Yes"))
return data
def add_section(data, label, icon, items):
"""Adds a section to the module data."""
if not items:
return
data.append({"label": label, "icon": icon, "items": items})
def add_custom_doctypes(data, doctype_info):
"""Adds Custom DocTypes to modules setup via `config/desktop.py`."""
add_section(
data,
_("Documents"),
"fa fa-star",
[d for d in doctype_info if (d.custom and d.document_type in ("Document", "Transaction"))],
)
add_section(
data,
_("Setup"),
"fa fa-cog",
[d for d in doctype_info if (d.custom and d.document_type in ("Setup", "Master", ""))],
)
def get_doctype_info(module):
"""Returns list of non child DocTypes for given module."""
active_domains = frappe.get_active_domains()
doctype_info = frappe.get_all(
"DocType",
filters={"module": module, "istable": 0},
or_filters={"ifnull(restrict_to_domain, '')": "", "restrict_to_domain": ("in", active_domains)},
fields=["'doctype' as type", "name", "description", "document_type", "custom", "issingle"],
order_by="custom asc, document_type desc, name asc",
)
for d in doctype_info:
d.document_type = d.document_type or ""
d.description = _(d.description or "")
return doctype_info
def combine_common_sections(data):
"""Combine sections declared in separate apps."""
sections = []
sections_dict = {}
for each in data:
if each["label"] not in sections_dict:
sections_dict[each["label"]] = each
sections.append(each)
else:
sections_dict[each["label"]]["items"] += each["items"]
return sections
def apply_permissions(data):
default_country = frappe.db.get_default("country")
user = frappe.get_user()
user.build_permissions()
allowed_pages = get_allowed_pages()
allowed_reports = get_allowed_reports()
new_data = []
for section in data:
new_items = []
for item in section.get("items") or []:
item = frappe._dict(item)
if item.country and item.country != default_country:
continue
if (
(item.type == "doctype" and item.name in user.can_read)
or (item.type == "page" and item.name in allowed_pages)
or (item.type == "report" and item.name in allowed_reports)
or item.type == "help"
):
new_items.append(item)
if new_items:
new_section = section.copy()
new_section["items"] = new_items
new_data.append(new_section)
return new_data
def get_disabled_reports():
if not hasattr(frappe.local, "disabled_reports"):
frappe.local.disabled_reports = set(r.name for r in frappe.get_all("Report", {"disabled": 1}))
return frappe.local.disabled_reports
def get_config(app, module):
"""Load module info from `[app].config.[module]`."""
config = frappe.get_module("{app}.config.{module}".format(app=app, module=module))
config = config.get_data()
sections = [s for s in config if s.get("condition", True)]
disabled_reports = get_disabled_reports()
for section in sections:
items = []
for item in section["items"]:
if item["type"] == "report" and item["name"] in disabled_reports:
continue
# some module links might not have name
if not item.get("name"):
item["name"] = item.get("label")
if not item.get("label"):
item["label"] = _(item.get("name"))
items.append(item)
section["items"] = items
return sections
def config_exists(app, module):
try:
frappe.get_module("{app}.config.{module}".format(app=app, module=module))
return True
except ImportError:
return False
def add_setup_section(config, app, module, label, icon):
"""Add common sections to `/desk#Module/Setup`"""
try:
setup_section = get_setup_section(app, module, label, icon)
if setup_section:
config.append(setup_section)
except ImportError:
pass
def get_setup_section(app, module, label, icon):
"""Get the setup section from each module (for global Setup page)."""
config = get_config(app, module)
for section in config:
if section.get("label") == _("Setup"):
return {"label": label, "icon": icon, "items": section["items"]}
def get_onboard_items(app, module):
try:
sections = get_config(app, module)
except ImportError:
return []
onboard_items = []
fallback_items = []
if not sections:
doctype_info = get_doctype_info(module)
sections = build_standard_config(module, doctype_info)
for section in sections:
for item in section["items"]:
if item.get("onboard", 0) == 1:
onboard_items.append(item)
# in case onboard is not set
fallback_items.append(item)
if len(onboard_items) > 5:
return onboard_items
return onboard_items or fallback_items
@frappe.whitelist()
def get_links_for_module(app, module):
return [{"value": l.get("name"), "label": l.get("label")} for l in get_links(app, module)]
def get_links(app, module):
try:
sections = get_config(app, frappe.scrub(module))
except ImportError:
return []
links = []
for section in sections:
for item in section["items"]:
links.append(item)
return links
@frappe.whitelist()
def get_desktop_settings():
from frappe.config import get_modules_from_all_apps_for_user
all_modules = get_modules_from_all_apps_for_user()
home_settings = get_home_settings()
modules_by_name = {}
for m in all_modules:
modules_by_name[m["module_name"]] = m
module_categories = ["Modules", "Domains", "Places", "Administration"]
user_modules_by_category = {}
user_saved_modules_by_category = home_settings.modules_by_category or {}
user_saved_links_by_module = home_settings.links_by_module or {}
def apply_user_saved_links(module):
module = frappe._dict(module)
all_links = get_links(module.app, module.module_name)
module_links_by_name = {}
for link in all_links:
module_links_by_name[link["name"]] = link
if module.module_name in user_saved_links_by_module:
user_links = frappe.parse_json(user_saved_links_by_module[module.module_name])
module.links = [module_links_by_name[l] for l in user_links if l in module_links_by_name]
return module
for category in module_categories:
if category in user_saved_modules_by_category:
user_modules = user_saved_modules_by_category[category]
user_modules_by_category[category] = [
apply_user_saved_links(modules_by_name[m]) for m in user_modules if modules_by_name.get(m)
]
else:
user_modules_by_category[category] = [
apply_user_saved_links(m) for m in all_modules if m.get("category") == category
]
# filter out hidden modules
if home_settings.hidden_modules:
for category in user_modules_by_category:
hidden_modules = home_settings.hidden_modules or []
modules = user_modules_by_category[category]
user_modules_by_category[category] = [
module for module in modules if module.module_name not in hidden_modules
]
return user_modules_by_category
@frappe.whitelist()
def update_hidden_modules(category_map):
category_map = frappe.parse_json(category_map)
home_settings = get_home_settings()
saved_hidden_modules = home_settings.hidden_modules or []
for category in category_map:
config = frappe._dict(category_map[category])
saved_hidden_modules += config.removed or []
saved_hidden_modules = [d for d in saved_hidden_modules if d not in (config.added or [])]
if home_settings.get("modules_by_category") and home_settings.modules_by_category.get(category):
module_placement = [
d for d in (config.added or []) if d not in home_settings.modules_by_category[category]
]
home_settings.modules_by_category[category] += module_placement
home_settings.hidden_modules = saved_hidden_modules
set_home_settings(home_settings)
return get_desktop_settings()
@frappe.whitelist()
def update_global_hidden_modules(modules):
modules = frappe.parse_json(modules)
frappe.only_for("System Manager")
doc = frappe.get_doc("User", "Administrator")
doc.set("block_modules", [])
for module in modules:
doc.append("block_modules", {"module": module})
doc.save(ignore_permissions=True)
return get_desktop_settings()
@frappe.whitelist()
def update_modules_order(module_category, modules):
modules = frappe.parse_json(modules)
home_settings = get_home_settings()
home_settings.modules_by_category = home_settings.modules_by_category or {}
home_settings.modules_by_category[module_category] = modules
set_home_settings(home_settings)
@frappe.whitelist()
def update_links_for_module(module_name, links):
links = frappe.parse_json(links)
home_settings = get_home_settings()
home_settings.setdefault("links_by_module", {})
home_settings["links_by_module"].setdefault(module_name, None)
home_settings["links_by_module"][module_name] = links
set_home_settings(home_settings)
return get_desktop_settings()
@frappe.whitelist()
def get_options_for_show_hide_cards():
global_options = []
if "System Manager" in frappe.get_roles():
global_options = get_options_for_global_modules()
return {"user_options": get_options_for_user_blocked_modules(), "global_options": global_options}
@frappe.whitelist()
def get_options_for_global_modules():
from frappe.config import get_modules_from_all_apps
all_modules = get_modules_from_all_apps()
blocked_modules = frappe.get_doc("User", "Administrator").get_blocked_modules()
options = []
for module in all_modules:
module = frappe._dict(module)
options.append(
{
"category": module.category,
"label": module.label,
"value": module.module_name,
"checked": module.module_name not in blocked_modules,
}
)
return options
@frappe.whitelist()
def get_options_for_user_blocked_modules():
from frappe.config import get_modules_from_all_apps_for_user
all_modules = get_modules_from_all_apps_for_user()
home_settings = get_home_settings()
hidden_modules = home_settings.hidden_modules or []
options = []
for module in all_modules:
module = frappe._dict(module)
options.append(
{
"category": module.category,
"label": module.label,
"value": module.module_name,
"checked": module.module_name not in hidden_modules,
}
)
return options
def set_home_settings(home_settings):
frappe.cache().hset("home_settings", frappe.session.user, home_settings)
frappe.db.set_value("User", frappe.session.user, "home_settings", json.dumps(home_settings))
@frappe.whitelist()
def get_home_settings():
def get_from_db():
settings = frappe.db.get_value("User", frappe.session.user, "home_settings")
return frappe.parse_json(settings or "{}")
home_settings = frappe.cache().hget("home_settings", frappe.session.user, get_from_db)
return home_settings
def get_module_link_items_from_list(app, module, list_of_link_names):
try:
sections = get_config(app, frappe.scrub(module))
except ImportError:
return []
links = []
for section in sections:
for item in section["items"]:
if item.get("label", "") in list_of_link_names:
links.append(item)
return links
def set_last_modified(data):
for section in data:
for item in section["items"]:
if item["type"] == "doctype":
item["last_modified"] = get_last_modified(item["name"])
def get_last_modified(doctype):
def _get():
try:
last_modified = frappe.get_all(
doctype, fields=["max(modified)"], as_list=True, limit_page_length=1
)[0][0]
except Exception as e:
if frappe.db.is_table_missing(e):
last_modified = None
else:
raise
# hack: save as -1 so that it is cached
if last_modified == None:
last_modified = -1
return last_modified
last_modified = frappe.cache().hget("last_modified", doctype, _get)
if last_modified == -1:
last_modified = None
return last_modified
def get_report_list(module, is_standard="No"):
"""Returns list on new style reports for modules."""
reports = frappe.get_list(
"Report",
fields=["name", "ref_doctype", "report_type"],
filters={"is_standard": is_standard, "disabled": 0, "module": module},
order_by="name",
)
out = []
for r in reports:
out.append(
{
"type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1
if r.report_type in ("Query Report", "Script Report", "Custom Report")
else 0,
"label": _(r.name),
"name": r.name,
}
)
return out
| 26.038835
| 99
| 0.727691
|
ff76eee5af137c925641dfd7dca4bb3c72963dd1
| 515
|
py
|
Python
|
spectacle/tests/test_fitting.py
|
nmearl/spectacle
|
ae2d11a41dfae5430de392d54b06bf991003fd69
|
[
"BSD-3-Clause"
] | null | null | null |
spectacle/tests/test_fitting.py
|
nmearl/spectacle
|
ae2d11a41dfae5430de392d54b06bf991003fd69
|
[
"BSD-3-Clause"
] | 1
|
2021-05-13T20:51:13.000Z
|
2021-05-13T20:51:13.000Z
|
spectacle/tests/test_fitting.py
|
nmearl/spectacle
|
ae2d11a41dfae5430de392d54b06bf991003fd69
|
[
"BSD-3-Clause"
] | null | null | null |
from astropy.modeling.fitting import LevMarLSQFitter
from spectacle.modeling import Spectral1D, OpticalDepth1D
import astropy.units as u
import numpy as np
def test_levmar():
line1 = OpticalDepth1D(lambda_0=1216 * u.AA, v_doppler=500 * u.km /u.s, column_density=14)
spec_mod = Spectral1D(line1, continuum=1)
x = np.linspace(1200, 1225, 1000) * u.Unit('Angstrom')
y = spec_mod(x)
fitter = LevMarLSQFitter()
fit_spec_mod = fitter(spec_mod, x, y)
assert np.allclose(y, fit_spec_mod(x))
| 30.294118
| 94
| 0.724272
|
986f6a0e68fb365ff7368808c20f337c44c5b15b
| 2,461
|
py
|
Python
|
11/11.py
|
zinccat/Convex-Analysis-homework
|
16b2abc5a62717821fa3e06dee31ed99584347ff
|
[
"MIT"
] | 5
|
2021-05-02T14:01:00.000Z
|
2022-03-30T03:16:45.000Z
|
11/11.py
|
zinccat/Convex-Analysis-homework
|
16b2abc5a62717821fa3e06dee31ed99584347ff
|
[
"MIT"
] | null | null | null |
11/11.py
|
zinccat/Convex-Analysis-homework
|
16b2abc5a62717821fa3e06dee31ed99584347ff
|
[
"MIT"
] | 2
|
2022-03-06T03:49:26.000Z
|
2022-03-06T15:13:13.000Z
|
# HW11 给定函数的梯度下降优化
# By ZincCat
import numpy as np
from matplotlib import pyplot as plt
# 设置随机种子
np.random.seed(19890817)
# 初始化问题
m = 10
n = 15
a = np.random.normal(5, 5, [n, m])
x = np.ones(n)
def f(x):
# 计算函数值
return np.sum(np.exp(a.T@x)) + np.sum(np.exp(-a.T@x))
def gradient_f(x):
# 计算函数梯度
return a@(np.exp(a.T@x) - np.exp(-a.T@x))
def descent(x, grad, grad_norm, mode='2'):
# 梯度下降函数
# 输入目前x取值, 梯度, 梯度的范数, 下降模式
# 输出下降后x取值, 步长t
# 下降模式为'2'时采用2范数, 为'inf'时采用无穷范数
normalized_grad = grad/grad_norm
t = 1.0
if mode == '2':
# l_2 norm
while f(x - t*normalized_grad) > value - alpha*t*np.dot(grad, normalized_grad):
t *= beta
x -= t*normalized_grad
elif mode == 'inf':
# l_infty norm
while f(x - t*np.sign(normalized_grad)) > value - alpha*t*np.dot(grad, np.sign(normalized_grad)):
t *= beta
x -= t*np.sign(normalized_grad)
return x, t
minValue = f(np.zeros(n)) # 函数最小值
alpha_list = [0.22]
beta_list = [0.62]
maxIter = 1000 # 最大迭代次数
eta = 0.01 # 停止条件
result = [] # 记录 参数-结果 对
time = [] # 记录时间步, 用于绘图
values = [] # 记录某一时间步下函数值, 用于绘图
stepsize = [] # 记录某一时间步下步长, 用于绘图
Plot = True # 是否绘图, 请保证此时alpha, beta均为单一取值
t = 0 # 用于绘图
# 实验
for alpha in alpha_list:
for beta in beta_list:
timestep = 0
x = np.ones(n)
while True:
value = f(x)
# print("Iteration:", timestep, "Error", value - minValue)
if Plot:
time.append(timestep)
stepsize.append(t)
values.append(value)
grad = gradient_f(x)
grad_norm = np.linalg.norm(grad)
if grad_norm <= eta or timestep > maxIter:
break
x, t = descent(x, grad, grad_norm, mode='inf') # 此时使用无穷范数
timestep += 1
result.append((alpha, beta, f(x)-minValue, timestep))
for i in result:
print(i)
# 绘图
if Plot:
# f − p^* versus iteration
plt.plot(time, values)
plt.xlabel('Iterations', fontsize=14)
plt.ylabel('Value', fontsize=14)
plt.savefig('alpha'+str(alpha)+'beta'+str(beta)+'value.pdf')
plt.show()
# step length versus iteration number
del(time[0])
del(stepsize[0])
plt.plot(time, stepsize)
plt.xlabel('Iterations', fontsize=14)
plt.ylabel('Step length', fontsize=14)
plt.savefig('alpha'+str(alpha)+'beta'+str(beta)+'step.pdf')
plt.show()
| 24.61
| 105
| 0.572531
|
8453ab1f830f25573159eba866ca915bb3d27ca5
| 1,792
|
py
|
Python
|
dask_sql/physical/rel/logical/table_scan.py
|
rjzamora/dask-sql
|
c3ad6a9f6b01ce02127fde7501eaf322c8160f7e
|
[
"MIT"
] | 56
|
2021-08-17T21:21:24.000Z
|
2022-03-29T11:28:51.000Z
|
dask_sql/physical/rel/logical/table_scan.py
|
rjzamora/dask-sql
|
c3ad6a9f6b01ce02127fde7501eaf322c8160f7e
|
[
"MIT"
] | 195
|
2021-08-13T16:48:17.000Z
|
2022-03-29T16:17:48.000Z
|
dask_sql/physical/rel/logical/table_scan.py
|
rjzamora/dask-sql
|
c3ad6a9f6b01ce02127fde7501eaf322c8160f7e
|
[
"MIT"
] | 22
|
2021-08-17T08:31:27.000Z
|
2022-03-15T16:39:39.000Z
|
from typing import TYPE_CHECKING
from dask_sql.datacontainer import DataContainer
from dask_sql.physical.rel.base import BaseRelPlugin
if TYPE_CHECKING:
import dask_sql
from dask_sql.java import org
class DaskTableScanPlugin(BaseRelPlugin):
"""
A DaskTableScal is the main ingredient: it will get the data
from the database. It is always used, when the SQL looks like
SELECT .... FROM table ....
We need to get the dask dataframe from the registered
tables and return the requested columns from it.
Calcite will always refer to columns via index.
"""
class_name = "com.dask.sql.nodes.DaskTableScan"
def convert(
self, rel: "org.apache.calcite.rel.RelNode", context: "dask_sql.Context"
) -> DataContainer:
# There should not be any input. This is the first step.
self.assert_inputs(rel, 0)
# The table(s) we need to return
table = rel.getTable()
# The table names are all names split by "."
# We assume to always have the form something.something
table_names = [str(n) for n in table.getQualifiedName()]
assert len(table_names) == 2
schema_name = table_names[0]
table_name = table_names[1]
table_name = table_name.lower()
dc = context.schema[schema_name].tables[table_name]
df = dc.df
cc = dc.column_container
# Make sure we only return the requested columns
row_type = table.getRowType()
field_specifications = [str(f) for f in row_type.getFieldNames()]
cc = cc.limit_to(field_specifications)
cc = self.fix_column_to_row_type(cc, rel.getRowType())
dc = DataContainer(df, cc)
dc = self.fix_dtype_to_row_type(dc, rel.getRowType())
return dc
| 32.581818
| 80
| 0.667969
|
09201441818015d1809ec5698d1811ea09583fbf
| 929
|
py
|
Python
|
test/test_minimal_software_asset.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
test/test_minimal_software_asset.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
test/test_minimal_software_asset.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.minimal_software_asset import MinimalSoftwareAsset # noqa: E501
from openapi_client.rest import ApiException
class TestMinimalSoftwareAsset(unittest.TestCase):
"""MinimalSoftwareAsset unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMinimalSoftwareAsset(self):
"""Test MinimalSoftwareAsset"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.minimal_software_asset.MinimalSoftwareAsset() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.658537
| 99
| 0.717976
|
14210c3b83ac555606dc4dc3f4d5a714f7d8154f
| 8,695
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_available_service_aliases_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_available_service_aliases_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_available_service_aliases_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailableServiceAliasesOperations(object):
"""AvailableServiceAliasesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AvailableServiceAliasesResult"]
"""Gets all available service aliases for this subscription in this region.
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableServiceAliasesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.AvailableServiceAliasesResult"]
"""Gets all available service aliases for this resource group in this region.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableServiceAliasesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
| 46.497326
| 204
| 0.650029
|
073a8d5a67e53ae645e119e666eb1893e7066f35
| 2,768
|
py
|
Python
|
pysm/experiments/semantic_labeling/summary.py
|
binh-vu/semantic-modeling
|
b387584502ba1daa6abd6b7573828416f6426b49
|
[
"MIT"
] | 3
|
2019-10-31T15:26:20.000Z
|
2022-03-03T06:04:03.000Z
|
pysm/experiments/semantic_labeling/summary.py
|
binh-vu/semantic-modeling
|
b387584502ba1daa6abd6b7573828416f6426b49
|
[
"MIT"
] | 1
|
2021-10-05T14:57:29.000Z
|
2022-03-27T01:58:41.000Z
|
pysm/experiments/semantic_labeling/summary.py
|
binh-vu/semantic-modeling
|
b387584502ba1daa6abd6b7573828416f6426b49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import shutil
import ujson
from pathlib import Path
import numpy as np
# import gmtk.config
# gmtk.config.USE_C_EXTENSION = False
from semantic_labeling import create_semantic_typer
from semantic_modeling.assembling.learning.evaluate import predict_sm, evaluate
from datetime import datetime
from experiments.slack_notifier import send_message, ExpResultMessage, TextMessage
from semantic_modeling.assembling.learning.online_learning import create_default_model, online_learning
from semantic_modeling.assembling.learning.shared_models import TrainingArgs
from semantic_modeling.assembling.undirected_graphical_model.model import Model
from semantic_modeling.config import config
from semantic_modeling.data_io import get_semantic_models, get_short_train_name
from semantic_modeling.settings import Settings
from semantic_modeling.utilities.serializable import serializeCSV, deserializeCSV
def get_shell_args():
def str2bool(v):
assert v.lower() in {"true", "false"}
return v.lower() == "true"
parser = argparse.ArgumentParser('Semantic labeling experiment')
parser.register("type", "boolean", str2bool)
parser.add_argument('--dataset', type=str, default=None, help="Dataset name")
parser.add_argument('--exp_dir', type=str, help='Experiment directory, must be existed before')
parser.add_argument('--channel', type=str)
parser.add_argument('--detail', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_shell_args()
dataset = args.dataset
kfold_eval = [["source", "mrr", "accuracy", "coverage"]]
exp_dir = Path(args.exp_dir)
assert exp_dir.exists()
for file in exp_dir.iterdir():
if file.name.endswith('.test.csv'):
eval = deserializeCSV(file)
assert eval[0][0] == 'source' and eval[-1][0] == 'average'
kfold_eval += eval[1:-1]
if len(kfold_eval) == 1:
print(">>> ERROR NO OUTPUTS")
send_message(config.slack.channel[args.channel], TextMessage(f"Experiment error: no outputs\n.*Experiment dir*: {args.exp_dir}"))
else:
average = [
'average',
np.average([float(x[1]) for x in kfold_eval[1:]]),
np.average([float(x[2]) for x in kfold_eval[1:]]),
np.average([float(x[3]) for x in kfold_eval[1:]]),
]
kfold_eval.append(average)
serializeCSV(kfold_eval, exp_dir / f"all.csv")
send_message(config.slack.channel[args.channel], ExpResultMessage(dataset, args.detail, args.exp_dir, {
"mrr": average[1],
"accuracy": average[2],
"coverage": average[3]
}))
print(">>> AVERAGE:", average)
| 36.906667
| 137
| 0.692558
|
df148910e62fc8528eceeb1afce7b2e74e3b36ff
| 749
|
py
|
Python
|
improved_enigma/main/tests/factories.py
|
2019342a/improved-enigma
|
7be2cf30673ed7cc3b78f68b3f22c1ba6d800dee
|
[
"MIT"
] | null | null | null |
improved_enigma/main/tests/factories.py
|
2019342a/improved-enigma
|
7be2cf30673ed7cc3b78f68b3f22c1ba6d800dee
|
[
"MIT"
] | 6
|
2021-09-06T08:36:33.000Z
|
2022-03-01T23:01:24.000Z
|
improved_enigma/main/tests/factories.py
|
2019342a/improved-enigma
|
7be2cf30673ed7cc3b78f68b3f22c1ba6d800dee
|
[
"MIT"
] | null | null | null |
from factory.django import DjangoModelFactory
from factory import Faker
from factory import SubFactory
from ..models import Customer
from ..models import Order
from ..models import Product
from ..models import Stock
class CustomerFactory(DjangoModelFactory):
full_name = Faker("name")
email = Faker("email")
class Meta:
model = Customer
class ProductFactory(DjangoModelFactory):
name = Faker("name")
description = Faker("text")
class Meta:
model = Product
class StockFactory(DjangoModelFactory):
product = SubFactory(ProductFactory)
class Meta:
model = Stock
class OrderFactory(DjangoModelFactory):
customer = SubFactory(CustomerFactory)
class Meta:
model = Order
| 19.205128
| 45
| 0.714286
|
325986b27dd73a92732384e751d9c9e719907fd5
| 6,363
|
py
|
Python
|
org/mk/training/dl/tfwordslstmmulti.py
|
slowbreathing/Deep-Breathe
|
bcc97cadfc53d3297317764ecfb2223e5e715fd1
|
[
"MIT"
] | 5
|
2019-05-01T03:49:32.000Z
|
2022-02-20T12:41:38.000Z
|
org/mk/training/dl/tfwordslstmmulti.py
|
slowbreathing/Deep-Breathe
|
bcc97cadfc53d3297317764ecfb2223e5e715fd1
|
[
"MIT"
] | null | null | null |
org/mk/training/dl/tfwordslstmmulti.py
|
slowbreathing/Deep-Breathe
|
bcc97cadfc53d3297317764ecfb2223e5e715fd1
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import random
import collections
import time
from tensorflow.python.ops import array_ops
from tensorflow.contrib.rnn.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import init_ops
from tensorflow.contrib.rnn import GRUCell
from org.mk.training.dl.common import input_one_hot
from org.mk.training.dl.util import get_rel_save_file
import sys
# data I/O
train_file=sys.argv[1]
data = open(train_file, 'r').read()
# Parameters
learning_rate = 0.001
#training_iters = 50000
training_iters = 200
display_step = 100
n_input = 3
# number of units in RNN cell
n_hidden = 5
rnd=np.random.RandomState(42)
def read_data(fname):
with open(fname) as f:
data = f.readlines()
data = [x.strip() for x in data]
data = [data[i].lower().split() for i in range(len(data))]
data = np.array(data)
data = np.reshape(data, [-1, ])
return data
train_data = read_data(train_file)
def build_dataset(words):
count = collections.Counter(words).most_common()
dictionary = dict()
sortedwords=sorted(set(words))
for i,word in enumerate(sortedwords):
dictionary[word] = i
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary
dictionary, reverse_dictionary = build_dataset(train_data)
vocab_size = len(dictionary)
# Place holder for Mini batch input output
x = tf.placeholder("float", [None, n_input, vocab_size])
y = tf.placeholder("float", [None, vocab_size])
# RNN output node weights and biases
weights = {
'out': tf.Variable([[-0.09588283, -2.2044923 , -0.74828255, 0.14180686, -0.32083616,
-0.9444244 , 0.06826905, -0.9728962 , -0.18506959, 1.0618515 ],
[ 1.156649 , 3.2738173 , -1.2556943 , -0.9079511 , -0.82127047,
-1.1448543 , -0.60807484, -0.5885713 , 1.0378786 , -0.7088431 ],
[ 1.006477 , 0.28033388, -0.1804534 , 0.8093307 , -0.36991575,
0.29115433, -0.01028167, -0.7357091 , 0.92254084, -0.10753923],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015]]
)
}
biases = {
'out': tf.Variable([ 0.1458478 , -0.3660951 , -2.1647317 , -1.9633691 , -0.24532059,
0.14005205, -1.0961286 , -0.43737876, 0.7028531 , -1.8481724 ]
)
}
#works with 3 dimensions. Line 112 takes care of extracting last (h*wy=by) in 2 dimensions
def RNN(x, weights, biases):
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.1)) as vs:
cell = rnn.MultiRNNCell([rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False),
rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)])
outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
return tf.expand_dims(tf.matmul(outputs[-1], weights['out'])[-1],0) + biases['out'],outputs[-1],states,weights['out'],biases['out']
pred,output,state,weights_out,biases_out = RNN(x, weights, biases)
# Loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
grads_and_vars_tf_style = optimizer.compute_gradients(cost)
train_tf_style = optimizer.apply_gradients(grads_and_vars_tf_style)
# Model evaluation
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
global_step = tf.Variable(0, name='global_step', trainable=False)
# Initializing the variables
init = tf.global_variables_initializer()
projectdir="rnn_words"
start_time = time.time()
def elapsed(sec):
if sec<60:
return str(sec) + " sec"
elif sec<(60*60):
return str(sec/60) + " min"
else:
return str(sec/(60*60)) + " hr"
# Launch the graph
saver = tf.train.Saver(max_to_keep=200)
with tf.Session() as session:
session.run(init)
step = 0
offset =2
end_offset = n_input + 1
acc_total = 0
loss_total = 0
print ("offset:",offset)
while step < training_iters:
if offset > (len(train_data)-end_offset):
offset = rnd.randint(0, n_input+1)
print("offset:", offset)
symbols_in_keys = [ input_one_hot(dictionary[ str(train_data[i])],vocab_size) for i in range(offset, offset+n_input) ]
symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input,vocab_size])
symbols_out_onehot=input_one_hot(dictionary[str(train_data[offset+n_input])],vocab_size)
symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])
tfgrads_and_vars_tf_style, _,acc, loss, onehot_pred,tfoutput,tfstate,tfout_weights,tfbiases_out = session.run([grads_and_vars_tf_style,train_tf_style, accuracy, cost, pred,output,state,weights_out,biases_out], \
feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
loss_total += loss
acc_total += acc
print("tfoutput:",tfoutput," tfstate:",tfstate)
print("onehot_pred:",onehot_pred)
print("loss:",loss)
print("tfgrads_and_vars_tf_style:",tfgrads_and_vars_tf_style)
if (step+1) % display_step == 0:
print("Iter= " + str(step+1) + ", Average Loss= " + \
"{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
"{:.2f}%".format(100*acc_total/display_step))
acc_total = 0
loss_total = 0
symbols_in = [train_data[i] for i in range(offset, offset + n_input)]
symbols_out = train_data[offset + n_input]
symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]
saver.save(session,
get_rel_save_file(projectdir)+ '%04d' % (step+1), global_step=global_step)
print("%s - Actual word:[%s] vs Predicted word:[%s]" % (symbols_in,symbols_out,symbols_out_pred))
step += 1
offset += (n_input+1)
print("Optimization Finished!")
print("Elapsed time: ", elapsed(time.time() - start_time))
| 39.76875
| 220
| 0.670596
|
e7607a42b94ee4061ca7064ef4c256067aa03f0c
| 1,622
|
py
|
Python
|
app/api.py
|
NTsystems/python_belgrade_meetup_9
|
4e7c2809a70f01b7593166aa5b788dba9333f9a8
|
[
"MIT"
] | 2
|
2018-07-02T10:34:19.000Z
|
2018-07-02T10:35:18.000Z
|
app/api.py
|
nmrkic/python_belgrade_meetup_9
|
4e7c2809a70f01b7593166aa5b788dba9333f9a8
|
[
"MIT"
] | null | null | null |
app/api.py
|
nmrkic/python_belgrade_meetup_9
|
4e7c2809a70f01b7593166aa5b788dba9333f9a8
|
[
"MIT"
] | 2
|
2017-03-22T10:39:05.000Z
|
2018-07-02T10:35:17.000Z
|
import tornado.ioloop
import tornado.web
import models
from tornado import websocket
import json
import os
clients = []
root = os.path.dirname(__file__)
class Tweets(tornado.web.RequestHandler):
def post(self):
try:
data = json.loads(self.request.body.decode('utf-8'))
models.add_tweet(data)
count = models.get_counts()
if clients and data.get('hashtags', []):
clients[0].send_message({'tweet': data, 'count': count})
self.write({'status': "ok"})
except:
self.write({'status': "error"})
def get(self):
try:
hashtag = self.get_argument("hashtag", "").lower()
tweets = models.get_last_tweets(hashtag)
self.write(tweets)
except:
pass
class SocketHandler(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
if self not in clients:
clients.append(self)
def on_close(self):
if self in clients:
clients.remove(self)
def on_message(self, message):
pass
@staticmethod
def send_message(data):
for client in clients:
client.write_message(data)
def make_app():
return tornado.web.Application([
(r"/tweets/?", Tweets),
(r'/socket/?', SocketHandler),
(r"/(.*)", tornado.web.StaticFileHandler, {"path": root, "default_filename": "index.html"}),
], autoreload=True)
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| 24.575758
| 100
| 0.589396
|
456d79b4a5e78504208e72dfe81decd2e07bb07f
| 35,207
|
py
|
Python
|
volttrontesting/platform/test_instance_setup.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 1
|
2020-06-08T16:54:28.000Z
|
2020-06-08T16:54:28.000Z
|
volttrontesting/platform/test_instance_setup.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 8
|
2016-10-07T22:49:28.000Z
|
2022-02-23T00:57:58.000Z
|
volttrontesting/platform/test_instance_setup.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import os
import shutil
import subprocess
import pytest
from configparser import ConfigParser
from volttron.platform import is_rabbitmq_available
from volttron.platform.instance_setup import _is_agent_installed
from volttron.utils import get_hostname
from volttron.platform.agent.utils import is_volttron_running
from volttrontesting.fixtures.rmq_test_setup import create_rmq_volttron_setup
from volttrontesting.utils.platformwrapper import create_volttron_home
HAS_RMQ = is_rabbitmq_available()
'''
Example variables to be used during each of the tests, depending on the prompts that will be asked
message_bus = "zmq"
rmq_home = ""
domain_name = ""
new_root_ca = "Y"
ca_country = "US"
ca_state = "test-state"
ca_location = "test-location"
ca_organization = "test-org"
ca_org_unit = "test-org-unit"
default_rmq_values = "Y"
remove_rmq_conf = "Y"
vip_address = ""
vip_port = ""
is_web_enabled = "Y"
web_protocol = "https"
web_port = ""
gen_web_cert = "Y"
is_vc = "N"
vc_admin_name = "test"
vc_admin_password = "test"
is_vcp = "N"
instance_name = ""
vc_hostname = ""
vc_port = "8443"
install_historian = "N"
install_driver = "N"
install_fake_device = "N"
install_listener = "N"
agent_autostart = "N"
'''
@contextlib.contextmanager
def create_vcfg_vhome():
debug_flag = os.environ.get('DEBUG', False)
vhome = create_volttron_home()
yield vhome
if not debug_flag:
shutil.rmtree(vhome, ignore_errors=True)
def test_should_remove_config_vhome(monkeypatch):
monkeypatch.setenv("DEBUG", '')
with create_vcfg_vhome() as vhome:
assert os.path.isdir(vhome)
assert not os.path.isdir(vhome)
def test_should_not_remove_config_vhome_when_debugging(monkeypatch):
monkeypatch.setenv("DEBUG", 1)
with create_vcfg_vhome() as vhome:
assert os.path.isdir(vhome)
assert os.path.isdir(vhome)
shutil.rmtree(vhome, ignore_errors=True)
def test_zmq_case_no_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "N"
is_vcp = "N"
install_historian = "N"
install_driver = "N"
install_listener = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
is_vcp,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
# print("CWD is: {}".format(os.getcwd()))
# print("OUT is: {}".format(out))
# print("ERROR is: {}".format(err))
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "volttron1"
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert not _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
def test_zmq_case_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "N"
is_vcp = "Y"
instance_name = "test_zmq"
vc_hostname = "{}{}".format("https://", get_hostname())
vc_port = "8443"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
is_vcp,
instance_name,
vc_hostname,
vc_port,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_zmq"
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert _is_agent_installed("vcp")
assert not _is_agent_installed("vc ")
assert not is_volttron_running(vhome)
def test_zmq_case_web_no_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_protocol = "https"
web_port = "8443"
gen_web_cert = "Y"
new_root_ca = "Y"
ca_country = "US"
ca_state = "test-state"
ca_location = "test-location"
ca_organization = "test-org"
ca_org_unit = "test-org-unit"
is_vc = "N"
is_vcp = "N"
install_historian = "N"
install_driver = "N"
install_listener = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_protocol,
web_port,
gen_web_cert,
new_root_ca,
ca_country,
ca_state,
ca_location,
ca_organization,
ca_org_unit,
is_vc,
is_vcp,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "volttron1"
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt")
assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem")
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert not _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
def test_zmq_case_web_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_protocol = "https"
web_port = "8443"
gen_web_cert = "Y"
new_root_ca = "Y"
ca_country = "US"
ca_state = "test-state"
ca_location = "test-location"
ca_organization = "test-org"
ca_org_unit = "test-org-unit"
is_vc = "N"
is_vcp = "Y"
instance_name = "test_zmq"
vc_hostname = "{}{}".format("https://", get_hostname())
vc_port = "8443"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_protocol,
web_port,
gen_web_cert,
new_root_ca,
ca_country,
ca_state,
ca_location,
ca_organization,
ca_org_unit,
is_vc,
is_vcp,
instance_name,
vc_hostname,
vc_port,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_zmq"
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt")
assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem")
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
def test_zmq_case_web_vc(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_protocol = "https"
web_port = "8443"
gen_web_cert = "Y"
new_root_ca = "Y"
ca_country = "US"
ca_state = "test-state"
ca_location = "test-location"
ca_organization = "test-org"
ca_org_unit = "test-org-unit"
is_vc = "Y"
is_vcp = "Y"
instance_name = "test_zmq"
install_historian = "N"
install_driver = "N"
install_listener = "N"
agent_autostart = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_protocol,
web_port,
gen_web_cert,
new_root_ca,
ca_country,
ca_state,
ca_location,
ca_organization,
ca_org_unit,
is_vc,
agent_autostart,
is_vcp,
instance_name,
agent_autostart,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_zmq"
assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt")
assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem")
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
def test_zmq_case_web_vc_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
config_path = os.path.join(vhome, "config")
message_bus = "zmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_protocol = "https"
web_port = "8443"
gen_web_cert = "Y"
new_root_ca = "Y"
ca_country = "US"
ca_state = "test-state"
ca_location = "test-location"
ca_organization = "test-org"
ca_org_unit = "test-org-unit"
is_vc = "Y"
is_vcp = "Y"
instance_name = "test_zmq"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_protocol,
web_port,
gen_web_cert,
new_root_ca,
ca_country,
ca_state,
ca_location,
ca_organization,
ca_org_unit,
is_vc,
agent_autostart,
is_vcp,
instance_name,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "zmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_zmq"
assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443")
assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt")
assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem")
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_no_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "N"
is_vcp = "N"
install_historian = "N"
install_driver = "N"
install_listener = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
is_vcp,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert not _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "N"
is_vcp = "Y"
vc_hostname = "{}{}".format("https://", get_hostname())
vc_port = "8443"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
is_vcp,
instance_name,
vc_hostname,
vc_port,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert _is_agent_installed("vcp")
assert not _is_agent_installed("vc ")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_web_no_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
is_web_enabled = "Y"
web_port = "8443"
is_vc = "N"
is_vcp = "N"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
install_historian = "N"
install_driver = "N"
install_listener = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_port,
is_vc,
is_vcp,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert not _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_web_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
is_web_enabled = "Y"
web_port = "8443"
is_vc = "N"
is_vcp = "Y"
vc_hostname = "{}{}".format("https://", get_hostname())
vc_port = "8443"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_port,
is_vc,
is_vcp,
instance_name,
vc_hostname,
vc_port,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert not _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_web_vc(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_port = "8443"
is_vc = "Y"
vc_admin_name = "test"
vc_admin_password = "test"
is_vcp = "Y"
install_historian = "N"
install_driver = "N"
install_listener = "N"
agent_autostart = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_port,
is_vc,
agent_autostart,
is_vcp,
instance_name,
agent_autostart,
install_historian,
install_driver,
install_listener
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert not _is_agent_installed("listener")
assert not _is_agent_installed("master_driver")
assert not _is_agent_installed("platform_historian")
assert _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
@pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.mark.timeout(360)
def test_rmq_case_web_vc_with_agents(monkeypatch):
with create_vcfg_vhome() as vhome:
monkeypatch.setenv("VOLTTRON_HOME", vhome)
monkeypatch.setenv("RABBITMQ_CONF_ENV_FILE", "")
config_path = os.path.join(vhome, "config")
message_bus = "rmq"
instance_name = "test_rmq"
vip_address = "tcp://127.0.0.15"
vip_port = "22916"
is_web_enabled = "Y"
web_port = "8443"
is_vc = "Y"
is_vcp = "Y"
install_historian = "Y"
install_driver = "Y"
install_fake_device = "Y"
install_listener = "Y"
agent_autostart = "N"
create_rmq_volttron_setup(vhome=vhome, env=os.environ, ssl_auth=True, instance_name=instance_name)
vcfg_args = "\n".join([message_bus,
vip_address,
vip_port,
is_web_enabled,
web_port,
is_vc,
agent_autostart,
is_vcp,
instance_name,
agent_autostart,
install_historian,
agent_autostart,
install_driver,
install_fake_device,
agent_autostart,
install_listener,
agent_autostart
])
with subprocess.Popen(["vcfg", "--vhome", vhome],
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
) as vcfg:
out, err = vcfg.communicate(vcfg_args)
assert os.path.exists(config_path)
config = ConfigParser()
config.read(config_path)
assert config.get('volttron', 'message-bus') == "rmq"
assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916"
assert config.get('volttron', 'instance-name') == "test_rmq"
assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443")
assert _is_agent_installed("listener")
assert _is_agent_installed("master_driver")
assert _is_agent_installed("platform_historian")
assert _is_agent_installed("vc ")
assert _is_agent_installed("vcp")
assert not is_volttron_running(vhome)
| 40.56106
| 129
| 0.50524
|
af1fc9110573b046fa817312ab97d8a265126663
| 65,665
|
py
|
Python
|
nova/tests/unit/test_utils.py
|
LinShuicheng/stx-nova
|
0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_utils.py
|
LinShuicheng/stx-nova
|
0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_utils.py
|
LinShuicheng/stx-nova
|
0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import os.path
import tempfile
import eventlet
from keystoneauth1 import adapter as ks_adapter
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1.identity import base as ks_identity
from keystoneauth1 import session as ks_session
import mock
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_context import fixture as context_fixture
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import units
import six
from nova import context
from nova import exception
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.objects import test_objects
from nova.tests.unit import utils as test_utils
from nova import utils
CONF = cfg.CONF
class GenericUtilsTestCase(test.NoDBTestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
result = utils.parse_server_string('')
self.assertEqual(('', ''), result)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_hostname_has_default(self):
hostname = u"\u7684hello"
defaultname = "Server-1"
self.assertEqual("hello", utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_has_default(self):
hostname = u"\u7684"
defaultname = "Server-1"
self.assertEqual(defaultname, utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_has_default_too_long(self):
hostname = u"\u7684"
defaultname = "a" * 64
self.assertEqual("a" * 63, utils.sanitize_hostname(hostname,
defaultname))
def test_hostname_empty_no_default(self):
hostname = u"\u7684"
self.assertEqual("", utils.sanitize_hostname(hostname))
def test_hostname_empty_minus_period(self):
hostname = "---..."
self.assertEqual("", utils.sanitize_hostname(hostname))
def test_hostname_with_space(self):
hostname = " a b c "
self.assertEqual("a-b-c", utils.sanitize_hostname(hostname))
def test_hostname_too_long(self):
hostname = "a" * 64
self.assertEqual(63, len(utils.sanitize_hostname(hostname)))
def test_hostname_truncated_no_hyphen(self):
hostname = "a" * 62
hostname = hostname + '-' + 'a'
res = utils.sanitize_hostname(hostname)
# we trim to 63 and then trim the trailing dash
self.assertEqual(62, len(res))
self.assertFalse(res.endswith('-'), 'The hostname ends with a -')
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
@mock.patch('nova.privsep.path.chown')
def test_temporary_chown(self, mock_chown):
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
mock_chown.assert_called_once_with(f.name, uid=2)
mock_chown.reset_mock()
mock_chown.assert_called_once_with(f.name, uid=os.getuid())
def test_get_shortened_ipv6(self):
self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
utils.get_shortened_ipv6(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
self.assertEqual("::1", utils.get_shortened_ipv6(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertEqual("caca::caca:0:babe:201:102",
utils.get_shortened_ipv6(
"caca:0000:0000:caca:0000:babe:0201:0102"))
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"failure")
def test_get_shortened_ipv6_cidr(self):
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600:0000:0000:0000:0000:0000:0000:0000/64"))
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600::1/64"))
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"failure")
def test_safe_ip_format(self):
self.assertEqual("[::1]", utils.safe_ip_format("::1"))
self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1"))
self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format(
"::ffff:127.0.0.1"))
self.assertEqual("localhost", utils.safe_ip_format("localhost"))
def test_format_remote_path(self):
self.assertEqual("[::1]:/foo/bar",
utils.format_remote_path("::1", "/foo/bar"))
self.assertEqual("127.0.0.1:/foo/bar",
utils.format_remote_path("127.0.0.1", "/foo/bar"))
self.assertEqual("[::ffff:127.0.0.1]:/foo/bar",
utils.format_remote_path("::ffff:127.0.0.1",
"/foo/bar"))
self.assertEqual("localhost:/foo/bar",
utils.format_remote_path("localhost", "/foo/bar"))
self.assertEqual("/foo/bar", utils.format_remote_path(None,
"/foo/bar"))
def test_get_hash_str(self):
base_str = b"foo"
base_unicode = u"foo"
value = hashlib.md5(base_str).hexdigest()
self.assertEqual(
value, utils.get_hash_str(base_str))
self.assertEqual(
value, utils.get_hash_str(base_unicode))
def test_get_obj_repr_unicode(self):
instance = instance_obj.Instance()
instance.display_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
# should be a bytes string if python2 before conversion
self.assertIs(str, type(repr(instance)))
self.assertIs(six.text_type,
type(utils.get_obj_repr_unicode(instance)))
def test_use_rootwrap(self):
self.flags(disable_rootwrap=False, group='workarounds')
self.flags(rootwrap_config='foo')
cmd = utils.get_root_helper()
self.assertEqual('sudo nova-rootwrap foo', cmd)
def test_use_sudo(self):
self.flags(disable_rootwrap=True, group='workarounds')
cmd = utils.get_root_helper()
self.assertEqual('sudo', cmd)
def test_ssh_execute(self):
expected_args = ('ssh', '-o', 'BatchMode=yes',
'remotehost', 'ls', '-l')
with mock.patch('nova.utils.execute') as mock_method:
utils.ssh_execute('remotehost', 'ls', '-l')
mock_method.assert_called_once_with(*expected_args)
def test_generate_hostid(self):
host = 'host'
project_id = '9b9e3c847e904b0686e8ffb20e4c6381'
hostId = 'fa123c6f74efd4aad95f84096f9e187caa0625925a9e7837b2b46792'
self.assertEqual(hostId, utils.generate_hostid(host, project_id))
def test_generate_hostid_with_none_host(self):
project_id = '9b9e3c847e904b0686e8ffb20e4c6381'
self.assertEqual('', utils.generate_hostid(None, project_id))
class TestCachedFile(test.NoDBTestCase):
@mock.patch('os.path.getmtime', return_value=1)
def test_read_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fresh, data = utils.read_cached_file("/this/is/a/fake")
fdata = utils._FILE_CACHE['/this/is/a/fake']["data"]
self.assertEqual(fdata, data)
@mock.patch('os.path.getmtime', return_value=2)
def test_read_modified_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fake_contents = "lorem ipsum"
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data=fake_contents)):
fresh, data = utils.read_cached_file("/this/is/a/fake")
self.assertEqual(data, fake_contents)
self.assertTrue(fresh)
def test_delete_cached_file(self):
filename = '/this/is/a/fake/deletion/of/cached/file'
utils._FILE_CACHE = {
filename: {"data": 1123, "mtime": 1}
}
self.assertIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
def test_delete_cached_file_not_exist(self):
# We expect that if cached file does not exist no Exception raised.
filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file'
self.assertNotIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
class RootwrapDaemonTestCase(test.NoDBTestCase):
@mock.patch('oslo_rootwrap.client.Client')
def test_get_client(self, mock_client):
mock_conf = mock.MagicMock()
utils.RootwrapDaemonHelper(mock_conf)
mock_client.assert_called_once_with(
["sudo", "nova-rootwrap-daemon", mock_conf])
@mock.patch('nova.utils.LOG.info')
def test_execute(self, mock_info):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.execute('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
mock_info.assert_has_calls([mock.call(
u'Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] '
u'kwargs=[%(kwargs)r]',
{'cmd': u'a 1', 'kwargs': {'run_as_root': True, 'foo': 'bar'}})])
def test_execute_with_kwargs(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.execute('a', 1, foo='bar', run_as_root=True, process_input=True)
daemon.client.execute.assert_called_once_with(['a', '1'], True)
def test_execute_fail(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2)
def test_execute_pass_with_check_exit_code(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
daemon.execute('b', 2, check_exit_code=[-2])
@mock.patch('time.sleep', new=mock.Mock())
def test_execute_fail_with_retry(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2, attempts=2)
daemon.client.execute.assert_has_calls(
[mock.call(['b', '2'], None),
mock.call(['b', '2'], None)])
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch('nova.utils.LOG.log')
def test_execute_fail_and_logging(self, mock_log):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
self.assertRaises(processutils.ProcessExecutionError,
daemon.execute, 'b', 2,
attempts=2,
loglevel=logging.CRITICAL,
log_errors=processutils.LOG_ALL_ERRORS)
mock_log.assert_has_calls(
[
mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s',
u'b 2'),
mock.call(logging.CRITICAL,
'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
'in %(end_time)0.3fs',
{'sanitized_cmd': u'b 2', 'return_code': -2,
'end_time': mock.ANY}),
mock.call(logging.CRITICAL,
u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r'
u'\nstdout: %(stdout)r\nstderr: %(stderr)r',
{'code': -2, 'cmd': u'b 2', 'stdout': u'None',
'stderr': u'None', 'desc': None}),
mock.call(logging.CRITICAL, u'%r failed. Retrying.', u'b 2'),
mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s',
u'b 2'),
mock.call(logging.CRITICAL,
'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
'in %(end_time)0.3fs',
{'sanitized_cmd': u'b 2', 'return_code': -2,
'end_time': mock.ANY}),
mock.call(logging.CRITICAL,
u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r'
u'\nstdout: %(stdout)r\nstderr: %(stderr)r',
{'code': -2, 'cmd': u'b 2', 'stdout': u'None',
'stderr': u'None', 'desc': None}),
mock.call(logging.CRITICAL, u'%r failed. Not Retrying.',
u'b 2')]
)
def test_trycmd(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(0, None, None))
daemon.trycmd('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
def test_trycmd_with_kwargs(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.execute = mock.Mock(return_value=('out', 'err'))
daemon.trycmd('a', 1, foo='bar', run_as_root=True,
loglevel=logging.WARN,
log_errors=True,
process_input=True,
delay_on_retry=False,
attempts=5,
check_exit_code=[200])
daemon.execute.assert_called_once_with('a', 1, attempts=5,
check_exit_code=[200],
delay_on_retry=False, foo='bar',
log_errors=True, loglevel=30,
process_input=True,
run_as_root=True)
def test_trycmd_fail(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
expected_err = six.text_type('''\
Unexpected error while running command.
Command: a 1
Exit code: -2''')
out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True)
daemon.client.execute.assert_called_once_with(['a', '1'], None)
self.assertIn(expected_err, err)
@mock.patch('time.sleep', new=mock.Mock())
def test_trycmd_fail_with_retry(self):
mock_conf = mock.MagicMock()
daemon = utils.RootwrapDaemonHelper(mock_conf)
daemon.client = mock.MagicMock()
daemon.client.execute = mock.Mock(return_value=(-2, None, None))
expected_err = six.text_type('''\
Unexpected error while running command.
Command: a 1
Exit code: -2''')
out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True,
attempts=3)
self.assertIn(expected_err, err)
daemon.client.execute.assert_has_calls(
[mock.call(['a', '1'], None),
mock.call(['a', '1'], None),
mock.call(['a', '1'], None)])
class AuditPeriodTest(test.NoDBTestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
# a fairly random time to test with
self.useFixture(utils_fixture.TimeFixture(
datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)))
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(
day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(
day=1,
month=6,
year=2011))
class MetadataToDictTestCase(test.NoDBTestCase):
def test_metadata_to_dict(self):
self.assertEqual(utils.metadata_to_dict(
[{'key': 'foo1', 'value': 'bar'},
{'key': 'foo2', 'value': 'baz'}]),
{'foo1': 'bar', 'foo2': 'baz'})
def test_metadata_to_dict_with_include_deleted(self):
metadata = [{'key': 'foo1', 'value': 'bar', 'deleted': 1442875429,
'other': 'stuff'},
{'key': 'foo2', 'value': 'baz', 'deleted': 0,
'other': 'stuff2'}]
self.assertEqual({'foo1': 'bar', 'foo2': 'baz'},
utils.metadata_to_dict(metadata,
include_deleted=True))
self.assertEqual({'foo2': 'baz'},
utils.metadata_to_dict(metadata,
include_deleted=False))
# verify correct default behavior
self.assertEqual(utils.metadata_to_dict(metadata),
utils.metadata_to_dict(metadata,
include_deleted=False))
def test_metadata_to_dict_empty(self):
self.assertEqual({}, utils.metadata_to_dict([]))
self.assertEqual({}, utils.metadata_to_dict([], include_deleted=True))
self.assertEqual({}, utils.metadata_to_dict([], include_deleted=False))
def test_dict_to_metadata(self):
def sort_key(adict):
return sorted(adict.items())
metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2'))
expected = [{'key': 'foo1', 'value': 'bar1'},
{'key': 'foo2', 'value': 'bar2'}]
self.assertEqual(sorted(metadata, key=sort_key),
sorted(expected, key=sort_key))
def test_dict_to_metadata_empty(self):
self.assertEqual(utils.dict_to_metadata({}), [])
class ExpectedArgsTestCase(test.NoDBTestCase):
def test_passes(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
@dec
def func(foo, bar, baz="lol"):
pass
# Call to ensure nothing errors
func(None, None)
def test_raises(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
def test_var_no_of_args(self):
@utils.expects_func_args('foo')
def dec(f):
return f
@dec
def func(bar, *args, **kwargs):
pass
# Call to ensure nothing errors
func(None)
def test_more_layers(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def dec_2(f):
def inner_f(*a, **k):
return f()
return inner_f
@dec_2
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
class StringLengthTestCase(test.NoDBTestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
def test_check_string_length_noname(self):
self.assertIsNone(utils.check_string_length(
'test', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, max_length=255)
class ValidateIntegerTestCase(test.NoDBTestCase):
def test_exception_converted(self):
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"im-not-an-int", "not-an-int")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
3.14, "Pie")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"299", "Sparta no-show",
min_value=300, max_value=300)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
55, "doing 55 in a 54",
max_value=54)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
six.unichr(129), "UnicodeError",
max_value=1000)
class ValidateNeutronConfiguration(test.NoDBTestCase):
def test_nova_network(self):
self.flags(use_neutron=False)
self.assertFalse(utils.is_neutron())
def test_neutron(self):
self.flags(use_neutron=True)
self.assertTrue(utils.is_neutron())
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
def test_is_auto_disk_config_disabled_none(self):
self.assertFalse(utils.is_auto_disk_config_disabled(None))
def test_is_auto_disk_config_disabled_false(self):
self.assertFalse(utils.is_auto_disk_config_disabled("false"))
class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
def get_image(self):
image_meta = {
"id": "fake-image",
"name": "fake-name",
"min_ram": 1,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
}
return image_meta
def get_flavor(self):
flavor = {
"id": "fake.flavor",
"root_gb": 10,
}
return flavor
def test_base_image_properties(self):
image = self.get_image()
# Verify that we inherit all the needed keys
sys_meta = utils.get_system_metadata_from_image(image)
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that everything else is ignored
self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
def test_inherit_image_properties(self):
image = self.get_image()
image["properties"] = {"foo1": "bar", "foo2": "baz"}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in image["properties"].items():
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(sys_meta[sys_key], expected)
def test_skip_image_properties(self):
image = self.get_image()
image["properties"] = {
"foo1": "bar", "foo2": "baz",
"mappings": "wizz", "img_block_device_mapping": "eek",
}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in image["properties"].items():
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
if key in utils.SM_SKIP_KEYS:
self.assertNotIn(sys_key, sys_meta)
else:
self.assertEqual(sys_meta[sys_key], expected)
def test_vhd_min_disk_image(self):
image = self.get_image()
flavor = self.get_flavor()
image["disk_format"] = "vhd"
sys_meta = utils.get_system_metadata_from_image(image, flavor)
# Verify that the min_disk property is taken from
# flavor's root_gb when using vhd disk format
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
def test_dont_inherit_empty_values(self):
image = self.get_image()
for key in utils.SM_INHERITABLE_KEYS:
image[key] = None
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertNotIn(sys_key, sys_meta)
class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
def get_system_metadata(self):
sys_meta = {
"image_min_ram": 1,
"image_min_disk": 1,
"image_disk_format": "raw",
"image_container_format": "bare",
}
return sys_meta
def test_image_from_system_metadata(self):
sys_meta = self.get_system_metadata()
sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
sys_meta["%simg_block_device_mapping" %
utils.SM_IMAGE_PROP_PREFIX] = "eek"
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that we inherit all the needed keys
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that we inherit the rest of metadata as properties
self.assertIn("properties", image)
for key in image["properties"]:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image["properties"][key], sys_meta[sys_key])
self.assertNotIn("img_block_device_mapping", image["properties"])
def test_dont_inherit_empty_values(self):
sys_meta = self.get_system_metadata()
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
sys_meta[sys_key] = None
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
self.assertNotIn(key, image)
class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase):
def test_inherit_image_properties(self):
properties = {"fake_prop": "fake_value"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(properties, image_meta["properties"])
def test_image_size(self):
volume = {"size": 10}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(10 * units.Gi, image_meta["size"])
def test_image_status(self):
volume = {}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual("active", image_meta["status"])
def test_values_conversion(self):
properties = {"min_ram": "5", "min_disk": "7"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(5, image_meta["min_ram"])
self.assertEqual(7, image_meta["min_disk"])
def test_suppress_not_image_properties(self):
properties = {"min_ram": "256", "min_disk": "128",
"image_id": "fake_id", "image_name": "fake_name",
"container_format": "ami", "disk_format": "ami",
"size": "1234", "checksum": "fake_checksum"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual({}, image_meta["properties"])
self.assertEqual(0, image_meta["size"])
# volume's properties should not be touched
self.assertNotEqual({}, properties)
class ResourceFilterTestCase(test.NoDBTestCase):
def _assert_filtering(self, res_list, filts, expected_tags):
actual_tags = utils.filter_and_format_resource_metadata('instance',
res_list, filts, 'metadata')
self.assertJsonEqual(expected_tags, actual_tags)
def test_filter_and_format_resource_metadata(self):
# Create some tags
# One overlapping pair, and one different key value pair
# i1 : foo=bar, bax=wibble
# i2 : foo=bar, baz=quux
# resources
i1 = {
'uuid': '1',
'metadata': {'foo': 'bar', 'bax': 'wibble'},
}
i2 = {
'uuid': '2',
'metadata': {'foo': 'bar', 'baz': 'quux'},
}
# Resources list
rl = [i1, i2]
# tags
i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'}
i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'}
i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'}
i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'}
# No filter
self._assert_filtering(rl, [], [i11, i12, i21, i22])
self._assert_filtering(rl, {}, [i11, i12, i21, i22])
# Key search
# Both should have tags with key 'foo' and value 'bar'
self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21])
# Both should have tags with key 'foo'
self._assert_filtering(rl, {'key': 'foo'}, [i11, i21])
# Only i2 should have tags with key 'baz' and value 'quux'
self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22])
# Only i2 should have tags with value 'quux'
self._assert_filtering(rl, {'value': 'quux'}, [i22])
# Empty list should be returned when no tags match
self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, [])
# Multiple values
# Only i2 should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']},
[i22])
# But when specified as two different filters, no tags should be
# returned. This is because, the filter will mean "return tags which
# have (key=baz AND value=quux) AND (key=baz AND value=wibble)
self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'},
{'key': 'baz', 'value': 'wibble'}], [])
# Test for regex
self._assert_filtering(rl, {'value': '(?s)\\Aqu..*\\Z'}, [i22])
# Make sure bug #1365887 is fixed
i1['metadata']['key3'] = 'a'
self._assert_filtering(rl, {'value': 'banana'}, [])
class SafeTruncateTestCase(test.NoDBTestCase):
def test_exception_to_dict_with_long_message_3_bytes(self):
# Generate Chinese byte string whose length is 300. This Chinese UTF-8
# character occupies 3 bytes. After truncating, the byte string length
# should be 255.
msg = u'\u8d75' * 100
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(255, len(byte_message))
def test_exception_to_dict_with_long_message_2_bytes(self):
# Generate Russian byte string whose length is 300. This Russian UTF-8
# character occupies 2 bytes. After truncating, the byte string length
# should be 254.
msg = encodeutils.safe_decode('\xd0\x92' * 150)
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(254, len(byte_message))
class SpawnNTestCase(test.NoDBTestCase):
def setUp(self):
super(SpawnNTestCase, self).setUp()
self.useFixture(context_fixture.ClearRequestContext())
self.spawn_name = 'spawn_n'
def test_spawn_n_no_context(self):
self.assertIsNone(common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual('test', args[0])
def fake(arg):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, 'test')
self.assertIsNone(common_context.get_current())
def test_spawn_n_context(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
def test_spawn_n_context_different_from_passed(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
ctxt_passed = context.RequestContext('user', 'project',
overwrite=False)
self.assertEqual(ctxt, common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt_passed, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
class SpawnTestCase(SpawnNTestCase):
def setUp(self):
super(SpawnTestCase, self).setUp()
self.spawn_name = 'spawn'
class UT8TestCase(test.NoDBTestCase):
def test_none_value(self):
self.assertIsInstance(utils.utf8(None), type(None))
def test_bytes_value(self):
some_value = b"fake data"
return_value = utils.utf8(some_value)
# check that type of returned value doesn't changed
self.assertIsInstance(return_value, type(some_value))
self.assertEqual(some_value, return_value)
def test_not_text_type(self):
return_value = utils.utf8(1)
self.assertEqual(b"1", return_value)
self.assertIsInstance(return_value, six.binary_type)
def test_text_type_with_encoding(self):
some_value = 'test\u2026config'
self.assertEqual(some_value, utils.utf8(some_value).decode("utf-8"))
class TestObjectCallHelpers(test.NoDBTestCase):
def test_with_primitives(self):
tester = mock.Mock()
tester.foo(1, 'two', three='four')
self.assertTrue(
test_utils.obj_called_with(tester.foo, 1, 'two', three='four'))
self.assertFalse(
test_utils.obj_called_with(tester.foo, 42, 'two', three='four'))
def test_with_object(self):
obj_base.NovaObjectRegistry.register(test_objects.MyObj)
obj = test_objects.MyObj(foo=1, bar='baz')
tester = mock.Mock()
tester.foo(1, obj)
self.assertTrue(
test_utils.obj_called_with(
tester.foo, 1,
test_objects.MyObj(foo=1, bar='baz')))
self.assertFalse(
test_utils.obj_called_with(
tester.foo, 1,
test_objects.MyObj(foo=2, bar='baz')))
def test_with_object_multiple(self):
obj_base.NovaObjectRegistry.register(test_objects.MyObj)
obj1 = test_objects.MyObj(foo=1, bar='baz')
obj2 = test_objects.MyObj(foo=3, bar='baz')
tester = mock.Mock()
tester.foo(1, obj1)
tester.foo(1, obj1)
tester.foo(3, obj2)
# Called at all
self.assertTrue(
test_utils.obj_called_with(
tester.foo, 1,
test_objects.MyObj(foo=1, bar='baz')))
# Called once (not true)
self.assertFalse(
test_utils.obj_called_once_with(
tester.foo, 1,
test_objects.MyObj(foo=1, bar='baz')))
# Not called with obj.foo=2
self.assertFalse(
test_utils.obj_called_with(
tester.foo, 1,
test_objects.MyObj(foo=2, bar='baz')))
# Called with obj.foo.3
self.assertTrue(
test_utils.obj_called_with(
tester.foo, 3,
test_objects.MyObj(foo=3, bar='baz')))
# Called once with obj.foo.3
self.assertTrue(
test_utils.obj_called_once_with(
tester.foo, 3,
test_objects.MyObj(foo=3, bar='baz')))
class GetKSAAdapterTestCase(test.NoDBTestCase):
"""Tests for nova.utils.get_endpoint_data()."""
def setUp(self):
super(GetKSAAdapterTestCase, self).setUp()
self.sess = mock.create_autospec(ks_session.Session, instance=True)
self.auth = mock.create_autospec(ks_identity.BaseIdentityPlugin,
instance=True)
load_adap_p = mock.patch(
'keystoneauth1.loading.load_adapter_from_conf_options')
self.addCleanup(load_adap_p.stop)
self.load_adap = load_adap_p.start()
ksa_fixture = self.useFixture(nova_fixtures.KSAFixture())
self.mock_ksa_load_auth = ksa_fixture.mock_load_auth
self.mock_ksa_load_sess = ksa_fixture.mock_load_sess
self.mock_ksa_session = ksa_fixture.mock_session
self.mock_ksa_load_auth.return_value = self.auth
self.mock_ksa_load_sess.return_value = self.sess
def test_bogus_service_type(self):
self.assertRaises(exception.ConfGroupForServiceTypeNotFound,
utils.get_ksa_adapter, 'bogus')
self.mock_ksa_load_auth.assert_not_called()
self.mock_ksa_load_sess.assert_not_called()
self.load_adap.assert_not_called()
def test_all_params(self):
ret = utils.get_ksa_adapter(
'image', ksa_auth='auth', ksa_session='sess',
min_version='min', max_version='max')
# Returned the result of load_adapter_from_conf_options
self.assertEqual(self.load_adap.return_value, ret)
# Because we supplied ksa_auth, load_auth* not called
self.mock_ksa_load_auth.assert_not_called()
# Ditto ksa_session/load_session*
self.mock_ksa_load_sess.assert_not_called()
# load_adapter* called with what we passed in (and the right group)
self.load_adap.assert_called_once_with(
utils.CONF, 'glance', session='sess', auth='auth',
min_version='min', max_version='max', raise_exc=False)
def test_auth_from_session(self):
self.sess.auth = 'auth'
ret = utils.get_ksa_adapter('baremetal', ksa_session=self.sess)
# Returned the result of load_adapter_from_conf_options
self.assertEqual(self.load_adap.return_value, ret)
# Because ksa_auth found in ksa_session, load_auth* not called
self.mock_ksa_load_auth.assert_not_called()
# Because we supplied ksa_session, load_session* not called
self.mock_ksa_load_sess.assert_not_called()
# load_adapter* called with the auth from the session
self.load_adap.assert_called_once_with(
utils.CONF, 'ironic', session=self.sess, auth='auth',
min_version=None, max_version=None, raise_exc=False)
def test_load_auth_and_session(self):
ret = utils.get_ksa_adapter('volumev3')
# Returned the result of load_adapter_from_conf_options
self.assertEqual(self.load_adap.return_value, ret)
# Had to load the auth
self.mock_ksa_load_auth.assert_called_once_with(utils.CONF, 'cinder')
# Had to load the session, passed in the loaded auth
self.mock_ksa_load_sess.assert_called_once_with(utils.CONF, 'cinder',
auth=self.auth)
# load_adapter* called with the loaded auth & session
self.load_adap.assert_called_once_with(
utils.CONF, 'cinder', session=self.sess, auth=self.auth,
min_version=None, max_version=None, raise_exc=False)
class GetEndpointTestCase(test.NoDBTestCase):
def setUp(self):
super(GetEndpointTestCase, self).setUp()
self.adap = mock.create_autospec(ks_adapter.Adapter, instance=True)
self.adap.endpoint_override = None
self.adap.service_type = 'stype'
self.adap.interface = ['admin', 'public']
def test_endpoint_override(self):
self.adap.endpoint_override = 'foo'
self.assertEqual('foo', utils.get_endpoint(self.adap))
self.adap.get_endpoint_data.assert_not_called()
self.adap.get_endpoint.assert_not_called()
def test_image_good(self):
self.adap.service_type = 'image'
self.adap.get_endpoint_data.return_value.catalog_url = 'url'
self.assertEqual('url', utils.get_endpoint(self.adap))
self.adap.get_endpoint_data.assert_called_once_with()
self.adap.get_endpoint.assert_not_called()
def test_image_bad(self):
self.adap.service_type = 'image'
self.adap.get_endpoint_data.side_effect = AttributeError
self.adap.get_endpoint.return_value = 'url'
self.assertEqual('url', utils.get_endpoint(self.adap))
self.adap.get_endpoint_data.assert_called_once_with()
self.adap.get_endpoint.assert_called_once_with()
def test_nonimage_good(self):
self.adap.get_endpoint.return_value = 'url'
self.assertEqual('url', utils.get_endpoint(self.adap))
self.adap.get_endpoint_data.assert_not_called()
self.adap.get_endpoint.assert_called_once_with()
def test_nonimage_try_interfaces(self):
self.adap.get_endpoint.side_effect = (ks_exc.EndpointNotFound, 'url')
self.assertEqual('url', utils.get_endpoint(self.adap))
self.adap.get_endpoint_data.assert_not_called()
self.assertEqual(2, self.adap.get_endpoint.call_count)
self.assertEqual('admin', self.adap.interface)
def test_nonimage_try_interfaces_fail(self):
self.adap.get_endpoint.side_effect = ks_exc.EndpointNotFound
self.assertRaises(ks_exc.EndpointNotFound,
utils.get_endpoint, self.adap)
self.adap.get_endpoint_data.assert_not_called()
self.assertEqual(3, self.adap.get_endpoint.call_count)
self.assertEqual('public', self.adap.interface)
class RunOnceTests(test.NoDBTestCase):
fake_logger = mock.MagicMock()
@utils.run_once("already ran once", fake_logger)
def dummy_test_func(self, fail=False):
if fail:
raise ValueError()
return True
def setUp(self):
super(RunOnceTests, self).setUp()
self.dummy_test_func.reset()
RunOnceTests.fake_logger.reset_mock()
def test_wrapped_funtions_called_once(self):
self.assertFalse(self.dummy_test_func.called)
result = self.dummy_test_func()
self.assertTrue(result)
self.assertTrue(self.dummy_test_func.called)
# assert that on second invocation no result
# is returned and that the logger is invoked.
result = self.dummy_test_func()
RunOnceTests.fake_logger.assert_called_once()
self.assertIsNone(result)
def test_wrapped_funtions_called_once_raises(self):
self.assertFalse(self.dummy_test_func.called)
self.assertRaises(ValueError, self.dummy_test_func, fail=True)
self.assertTrue(self.dummy_test_func.called)
# assert that on second invocation no result
# is returned and that the logger is invoked.
result = self.dummy_test_func()
RunOnceTests.fake_logger.assert_called_once()
self.assertIsNone(result)
def test_wrapped_funtions_can_be_reset(self):
# assert we start with a clean state
self.assertFalse(self.dummy_test_func.called)
result = self.dummy_test_func()
self.assertTrue(result)
self.dummy_test_func.reset()
# assert we restored a clean state
self.assertFalse(self.dummy_test_func.called)
result = self.dummy_test_func()
self.assertTrue(result)
# assert that we never called the logger
RunOnceTests.fake_logger.assert_not_called()
def test_reset_calls_cleanup(self):
mock_clean = mock.Mock()
@utils.run_once("already ran once", self.fake_logger,
cleanup=mock_clean)
def f():
pass
f()
self.assertTrue(f.called)
f.reset()
self.assertFalse(f.called)
mock_clean.assert_called_once_with()
def test_clean_is_not_called_at_reset_if_wrapped_not_called(self):
mock_clean = mock.Mock()
@utils.run_once("already ran once", self.fake_logger,
cleanup=mock_clean)
def f():
pass
self.assertFalse(f.called)
f.reset()
self.assertFalse(f.called)
self.assertFalse(mock_clean.called)
def test_reset_works_even_if_cleanup_raises(self):
mock_clean = mock.Mock(side_effect=ValueError())
@utils.run_once("already ran once", self.fake_logger,
cleanup=mock_clean)
def f():
pass
f()
self.assertTrue(f.called)
self.assertRaises(ValueError, f.reset)
self.assertFalse(f.called)
mock_clean.assert_called_once_with()
class TestResourceClassNormalize(test.NoDBTestCase):
def test_normalize_name(self):
values = [
("foo", "CUSTOM_FOO"),
("VCPU", "CUSTOM_VCPU"),
("CUSTOM_BOB", "CUSTOM_CUSTOM_BOB"),
("CUSTM_BOB", "CUSTOM_CUSTM_BOB"),
]
for test_value, expected in values:
result = utils.normalize_rc_name(test_value)
self.assertEqual(expected, result)
def test_normalize_name_bug_1762789(self):
"""The .upper() builtin treats sharp S (\xdf) differently in py2 vs.
py3. Make sure normalize_name handles it properly.
"""
name = u'Fu\xdfball'
self.assertEqual(u'CUSTOM_FU_BALL', utils.normalize_rc_name(name))
class TestGetConfGroup(test.NoDBTestCase):
"""Tests for nova.utils._get_conf_group"""
@mock.patch('nova.utils.CONF')
@mock.patch('nova.utils._SERVICE_TYPES.get_project_name')
def test__get_conf_group(self, mock_get_project_name, mock_conf):
test_conf_grp = 'test_confgrp'
test_service_type = 'test_service_type'
mock_get_project_name.return_value = test_conf_grp
# happy path
mock_conf.test_confgrp = None
actual_conf_grp = utils._get_conf_group(test_service_type)
self.assertEqual(test_conf_grp, actual_conf_grp)
mock_get_project_name.assert_called_once_with(test_service_type)
# service type as the conf group
del mock_conf.test_confgrp
mock_conf.test_service_type = None
actual_conf_grp = utils._get_conf_group(test_service_type)
self.assertEqual(test_service_type, actual_conf_grp)
@mock.patch('nova.utils._SERVICE_TYPES.get_project_name')
def test__get_conf_group_fail(self, mock_get_project_name):
test_service_type = 'test_service_type'
# not confgrp
mock_get_project_name.return_value = None
self.assertRaises(exception.ConfGroupForServiceTypeNotFound,
utils._get_conf_group, None)
# not hasattr
mock_get_project_name.return_value = 'test_fail'
self.assertRaises(exception.ConfGroupForServiceTypeNotFound,
utils._get_conf_group, test_service_type)
class TestGetAuthAndSession(test.NoDBTestCase):
"""Tests for nova.utils._get_auth_and_session"""
def setUp(self):
super(TestGetAuthAndSession, self).setUp()
self.test_auth = 'test_auth'
self.test_session = 'test_session'
self.test_session_auth = 'test_session_auth'
self.test_confgrp = 'test_confgrp'
self.mock_session = mock.Mock()
self.mock_session.auth = self.test_session_auth
@mock.patch('nova.utils.ks_loading.load_auth_from_conf_options')
@mock.patch('nova.utils.ks_loading.load_session_from_conf_options')
def test_auth_and_session(self, mock_load_session, mock_load_auth):
# yes auth, yes session
actual = utils._get_auth_and_session(self.test_confgrp,
ksa_auth=self.test_auth,
ksa_session=self.test_session)
self.assertEqual(actual, (self.test_auth, self.test_session))
mock_load_session.assert_not_called()
mock_load_auth.assert_not_called()
@mock.patch('nova.utils.ks_loading.load_auth_from_conf_options')
@mock.patch('nova.utils.ks_loading.load_session_from_conf_options')
@mock.patch('nova.utils.CONF')
def test_no_session(self, mock_CONF, mock_load_session, mock_load_auth):
# yes auth, no session
mock_load_session.return_value = self.test_session
actual = utils._get_auth_and_session(self.test_confgrp,
ksa_auth=self.test_auth,
ksa_session=None)
self.assertEqual(actual, (self.test_auth, self.test_session))
mock_load_session.assert_called_once_with(mock_CONF, self.test_confgrp,
auth=self.test_auth)
mock_load_auth.assert_not_called()
@mock.patch('nova.utils.ks_loading.load_auth_from_conf_options')
@mock.patch('nova.utils.ks_loading.load_session_from_conf_options')
def test_no_auth(self, mock_load_session, mock_load_auth):
# no auth, yes session, yes session.auth
actual = utils._get_auth_and_session(self.test_confgrp, ksa_auth=None,
ksa_session=self.mock_session)
self.assertEqual(actual, (self.test_session_auth, self.mock_session))
mock_load_session.assert_not_called()
mock_load_auth.assert_not_called()
@mock.patch('nova.utils.ks_loading.load_auth_from_conf_options')
@mock.patch('nova.utils.ks_loading.load_session_from_conf_options')
@mock.patch('nova.utils.CONF')
def test_no_auth_no_sauth(self, mock_CONF, mock_load_session,
mock_load_auth):
# no auth, yes session, no session.auth
mock_load_auth.return_value = self.test_auth
self.mock_session.auth = None
actual = utils._get_auth_and_session(self.test_confgrp, ksa_auth=None,
ksa_session=self.mock_session)
self.assertEqual(actual, (self.test_auth, self.mock_session))
mock_load_session.assert_not_called()
mock_load_auth.assert_called_once_with(mock_CONF, self.test_confgrp)
@mock.patch('nova.utils.ks_loading.load_auth_from_conf_options')
@mock.patch('nova.utils.ks_loading.load_session_from_conf_options')
@mock.patch('nova.utils.CONF')
def test__get_auth_and_session(self, mock_CONF, mock_load_session,
mock_load_auth):
# no auth, no session
mock_load_auth.return_value = self.test_auth
mock_load_session.return_value = self.test_session
actual = utils._get_auth_and_session(self.test_confgrp, ksa_auth=None,
ksa_session=None)
self.assertEqual(actual, (self.test_auth, self.test_session))
mock_load_session.assert_called_once_with(mock_CONF, self.test_confgrp,
auth=self.test_auth)
mock_load_auth.assert_called_once_with(mock_CONF, self.test_confgrp)
class TestGetSDKAdapter(test.NoDBTestCase):
"""Tests for nova.utils.get_sdk_adapter"""
@mock.patch('nova.utils._get_conf_group')
@mock.patch('nova.utils._get_auth_and_session')
@mock.patch('nova.utils.connection.Connection')
@mock.patch('nova.utils.CONF')
def test_get_sdk_adapter(self, mock_conf, mock_connection,
mock_get_auth_sess, mock_get_confgrp):
service_type = 'test_service'
mock_conn = mock.Mock()
mock_proxy = mock.Mock()
setattr(mock_conn, service_type, mock_proxy)
mock_connection.return_value = mock_conn
mock_session = mock.Mock()
mock_get_auth_sess.return_value = (None, mock_session)
mock_get_confgrp.return_value = mock_confgrp = mock.Mock()
actual = utils.get_sdk_adapter(service_type)
self.assertEqual(actual, mock_proxy)
mock_get_confgrp.assert_called_once_with(service_type)
mock_get_auth_sess.assert_called_once_with(mock_confgrp)
mock_connection.assert_called_once_with(session=mock_session,
oslo_conf=mock_conf)
@mock.patch('nova.utils._get_conf_group')
@mock.patch('nova.utils._get_auth_and_session')
@mock.patch('nova.utils.connection.Connection')
def test_get_sdk_adapter_fail(self, mock_connection, mock_get_auth_sess,
mock_get_confgrp):
service_type = 'test_service'
mock_get_confgrp.side_effect = \
exception.ConfGroupForServiceTypeNotFound(stype=service_type)
self.assertRaises(exception.ConfGroupForServiceTypeNotFound,
utils.get_sdk_adapter, service_type)
mock_get_confgrp.assert_called_once_with(service_type)
mock_connection.assert_not_called()
mock_get_auth_sess.assert_not_called()
| 41.066291
| 79
| 0.588502
|
0e4698d5bcbe84fa02becca4cdf6209496a99aa7
| 3,373
|
py
|
Python
|
clear_nb_cache.py
|
nigelnquande/clear-netbeans-cache
|
27a65c8ea7d187d9ff8508c6ceeaa47f623497ff
|
[
"MIT"
] | null | null | null |
clear_nb_cache.py
|
nigelnquande/clear-netbeans-cache
|
27a65c8ea7d187d9ff8508c6ceeaa47f623497ff
|
[
"MIT"
] | null | null | null |
clear_nb_cache.py
|
nigelnquande/clear-netbeans-cache
|
27a65c8ea7d187d9ff8508c6ceeaa47f623497ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Program to clear the Netbeans Cache
# TODO: Do not delete anything that starts with *jython* (including subdirectories and directory contents).
import os
from sys import stderr, argv
from shutil import rmtree
def getVersion ():
return "0.00.01"
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def clean_nb_cache(cache_dir, verbose):
"""
Given a directory containing the Netbeans cache structure, check that it has
subdirectories and ask which one to clean, then delete all contents in 'index'
"""
total_dirs = 0
subdirs = get_immediate_subdirectories(cache_dir)
if len(subdirs) > 0:
ask_dir = "For which version of NetBeans do you wish to clear caches?\nType the directory at the prompt (or leave blank for none), followed by <Enter>."
for a_dir in subdirs:
ask_dir = ask_dir + "\n\t" + a_dir
ask_dir = ask_dir + "\n\t? "
clean_dir = input(ask_dir).rstrip()
if len(clean_dir) > 0:
clean_full = os.path.join(cache_dir, clean_dir, "index")
print("Cleaning " + clean_full + " ...")
subdirs = get_immediate_subdirectories(clean_full) # Don't want to delete index but its subdirectories
for a_dir in subdirs:
if str(a_dir).startswith("jython"):
continue # ignore jython directories
if (verbose):
print("Cleaning " + a_dir + " ...")
total_dirs += 1
rmtree(os.path.join(clean_full, a_dir), ignore_errors=True) # Will delete the dir as well
if (verbose):
print ("Cleaning " + os.path.join(clean_full, a_dir) + " ...")
total_dirs += 1
else:
print("No cache directory supplied!", file=stderr)
else:
print("Cannot find a netbeans version in " + cache_dir, file=stderr)
print("Cleaning cache completed. (%s directories cleaned.)" % total_dirs)
def main(verbose):
print("NetBeans Cache Cleaner [" + getVersion() + "]")
print("\nPlease exit NetBeans if you haven't already ...\nSupply -v or --verbose for verbose output. Supply -V or --version for version information.\n")
home_dir = os.getenv("HOME", "/home") # Get the location of $HOME (UNIX)
cache_dir = os.path.join(home_dir, ".cache", "netbeans")
if os.path.isdir(cache_dir): # Get a list of directories in $HOME/.cache/netbeans/, assuming it exists
clean_nb_cache(cache_dir, verbose)
elif os.path.isdir(os.path.join(home_dir, ".netbeans", "cache")): # Alternate cache location
clean_nb_cache(os.path.join(home_dir, ".netbeans", "cache"), verbose)
elif os.getenv("OS", "?").startswith("Windows") and len(os.getenv("USERPROFILE", "")) > 0: # NetBeans puts its cache in different location in home dir on Windows
home_dir = os.getenv("USERPROFILE", "") # Windows Equivalent of HOME
cache_dir = os.path.join(home_dir, "AppData", "Local", "NetBeans", "Cache")
if os.path.isdir(cache_dir):
clean_nb_cache(cache_dir, verbose)
else:
print("Cannot find NetBeans cache in " + cache_dir, file=stderr)
else:
print("Cannot find NetBeans cache in " + cache_dir, file=stderr)
# Ask the user to choose which version of Netbeans to clear, based on directories
# Delete all files and directories (recursive) within the 'index' dir
if __name__ == "__main__":
if ("-V" in argv) or ("--version") in argv:
print(getVersion())
else:
verbose = ("-v" in argv) or ("--verbose" in argv)
main(verbose)
| 42.696203
| 162
| 0.702342
|
eec401f04cfc542bf92fc67fa0d722b0d1b44b01
| 1,169
|
py
|
Python
|
tests/unit/test_mpath.py
|
darkwyrm/pymensago
|
add93a6a87def3b6909666d23b8d885cfbdee5c4
|
[
"MIT"
] | 1
|
2021-10-01T01:35:19.000Z
|
2021-10-01T01:35:19.000Z
|
tests/unit/test_mpath.py
|
darkwyrm/pymensago
|
add93a6a87def3b6909666d23b8d885cfbdee5c4
|
[
"MIT"
] | null | null | null |
tests/unit/test_mpath.py
|
darkwyrm/pymensago
|
add93a6a87def3b6909666d23b8d885cfbdee5c4
|
[
"MIT"
] | null | null | null |
import pymensago.mpath as mpath
def test_basename():
'''Tests mpath.basename()'''
assert mpath.basename('/ foo bar baz') == 'baz', 'basename test failed'
def test_parent():
'''Tests mpath.parent()'''
assert mpath.parent('/ foo bar baz') == '/ foo bar', 'parent test failed'
def test_split():
'''Tests mpath.split()'''
testdata = '/ foo bar baz / spam eggs / 123 456 789'
expected_data = ['/ foo bar baz', '/ spam eggs', '/ 123 456 789']
assert mpath.split(testdata) == expected_data, 'split test failed'
def test_validate_server_path():
'''Tests mpath.validate_server_path()'''
testpaths = [
'/',
'/ tmp 11111111-1111-1111-1111-111111111111 1234.1234.22222222-2222-2222-2222-222222222222',
'/ out 11111111-1111-1111-1111-111111111111 33333333-3333-3333-3333-333333333333',
'/ wsp 11111111-1111-1111-1111-111111111111 33333333-3333-3333-3333-333333333333 new '
'1234.1234.22222222-2222-2222-2222-222222222222',
]
for testpath in testpaths:
assert mpath.validate_server_path(testpath), 'validate_server_path test failed: ' + testpath
if __name__ == '__main__':
test_basename()
test_parent()
test_split()
test_validate_server_path()
| 29.974359
| 94
| 0.715141
|
accf3c3458d1f3eb2fa3379b9ec04a5b0571fc0b
| 9,171
|
py
|
Python
|
src/Examples/VideoUploaderClient.py
|
eacg91/WhosApp
|
c822b2aa61ab29e47ebf590570b15bb9558275c6
|
[
"MIT"
] | null | null | null |
src/Examples/VideoUploaderClient.py
|
eacg91/WhosApp
|
c822b2aa61ab29e47ebf590570b15bb9558275c6
|
[
"MIT"
] | null | null | null |
src/Examples/VideoUploaderClient.py
|
eacg91/WhosApp
|
c822b2aa61ab29e47ebf590570b15bb9558275c6
|
[
"MIT"
] | 1
|
2020-02-16T20:43:39.000Z
|
2020-02-16T20:43:39.000Z
|
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from Yowsup.connectionmanager import YowsupConnectionManager
import time, datetime, sys
from Yowsup.Media.downloader import MediaDownloader
from Yowsup.Media.uploader import MediaUploader
from sys import stdout
import os
import hashlib
import base64
from PIL import Image
import StringIO
size = 100,100
if sys.version_info >= (3, 0):
raw_input = input
class VideoUploaderClient:
def __init__(self, phoneNumber, imagePath, keepAlive = False, sendReceipts = False):
self.sendReceipts = sendReceipts
self.phoneNumber = phoneNumber
self.imagePath = imagePath
if '-' in phoneNumber:
self.jid = "%s@g.us" % phoneNumber
else:
self.jid = "%s@s.whatsapp.net" % phoneNumber
self.sentCache = {}
connectionManager = YowsupConnectionManager()
connectionManager.setAutoPong(keepAlive)
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
self.signalsInterface.registerListener("message_received", self.onMessageReceived)
self.signalsInterface.registerListener("receipt_messageSent", self.onMessageSent)
self.signalsInterface.registerListener("presence_updated", self.onPresenceUpdated)
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.signalsInterface.registerListener("media_uploadRequestSuccess", self.onmedia_uploadRequestSuccess)
self.signalsInterface.registerListener("media_uploadRequestFailed", self.onmedia_uploadRequestFailed)
self.signalsInterface.registerListener("media_uploadRequestDuplicate", self.onmedia_uploadRequestDuplicate)
self.path = ""
self.gotMediaReceipt = False
self.done = False
self.commandMappings = {"lastseen":lambda: self.methodsInterface.call("presence_request", ( self.jid,)),
"available": lambda: self.methodsInterface.call("presence_sendAvailable"),
"unavailable": lambda: self.methodsInterface.call("presence_sendUnavailable")
}
self.done = False
#signalsInterface.registerListener("receipt_messageDelivered", lambda jid, messageId: methodsInterface.call("delivered_ack", (jid, messageId)))
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
while not self.done:
time.sleep(0.5)
def onAuthSuccess(self, username):
print("Authed %s" % username)
self.methodsInterface.call("ready")
self.runCommand("/pic "+self.imagePath)
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onPresenceUpdated(self, jid, lastSeen):
formattedDate = datetime.datetime.fromtimestamp(long(time.time()) - lastSeen).strftime('%d-%m-%Y %H:%M')
self.onMessageReceived(0, jid, "LAST SEEN RESULT: %s"%formattedDate, long(time.time()), False, None, False)
def onMessageSent(self, jid, messageId):
formattedDate = datetime.datetime.fromtimestamp(self.sentCache[messageId][0]).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(self.username, formattedDate, self.sentCache[messageId][1]))
print(self.getPrompt())
def runCommand(self, command):
splitstr = command.split(' ')
if splitstr[0] == "/pic" and len(splitstr) == 2:
self.path = splitstr[1]
if not os.path.isfile(splitstr[1]):
print("File %s does not exists" % splitstr[1])
return 1
statinfo = os.stat(self.path)
name=os.path.basename(self.path)
print("Sending picture %s of size %s with name %s" %(self.path, statinfo.st_size, name))
mtype = "image"
sha1 = hashlib.sha256()
fp = open(self.path, 'rb')
try:
sha1.update(fp.read())
hsh = base64.b64encode(sha1.digest())
print("Sending media_requestUpload")
self.methodsInterface.call("media_requestUpload", (hsh, mtype, os.path.getsize(self.path)))
finally:
fp.close()
timeout = 100
t = 0;
while t < timeout and not self.gotMediaReceipt:
time.sleep(0.5)
t+=1
if not self.gotMediaReceipt:
print("MediaReceipt print timedout!")
else:
print("Got request MediaReceipt")
# added by sirpoot
self.done = True
return 1
elif command[0] == "/":
command = command[1:].split(' ')
try:
self.commandMappings[command[0]]()
return 1
except KeyError:
return 0
return 0
def onMessageReceived(self, messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
if jid[:jid.index('@')] != self.phoneNumber:
return
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(jid, formattedDate, messageContent))
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
print(self.getPrompt())
def goInteractive(self, jid):
print("Starting Interactive chat with %s" % jid)
jid = "%s@s.whatsapp.net" % jid
print(self.getPrompt())
while True:
message = raw_input()
message = message.strip()
if not len(message):
continue
if not self.runCommand(message.strip()):
msgId = self.methodsInterface.call("message_send", (jid, message))
self.sentCache[msgId] = [int(time.time()), message]
self.done = True
def getPrompt(self):
return "Enter Message or command: (/%s)" % ", /".join(self.commandMappings)
def onImageReceived(self, messageId, jid, preview, url, size, wantsReceipt, isBroadcast):
print("Image received: Id:%s Jid:%s Url:%s size:%s" %(messageId, jid, url, size))
downloader = MediaDownloader(self.onDlsuccess, self.onDlerror, self.onDlprogress)
downloader.download(url)
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
timeout = 10
t = 0;
while t < timeout:
time.sleep(0.5)
t+=1
def onDlsuccess(self, path):
stdout.write("\n")
stdout.flush()
print("Image downloded to %s"%path)
print(self.getPrompt())
def onDlerror(self):
stdout.write("\n")
stdout.flush()
print("Download Error")
print(self.getPrompt())
def onDlprogress(self, progress):
stdout.write("\r Progress: %s" % progress)
stdout.flush()
def onmedia_uploadRequestSuccess(self,_hash, url, resumeFrom):
print("Request Succ: hash: %s url: %s resume: %s"%(_hash, url, resumeFrom))
self.uploadImage(url)
self.gotMediaReceipt = True
def onmedia_uploadRequestFailed(self,_hash):
print("Request Fail: hash: %s"%(_hash))
self.gotReceipt = True
def onmedia_uploadRequestDuplicate(self,_hash, url):
print("Request Dublicate: hash: %s url: %s "%(_hash, url))
self.doSendImage(url)
self.gotMediaReceipt = True
def uploadImage(self, url):
uploader = MediaUploader(self.jid, self.username, self.onUploadSuccess, self.onError, self.onProgressUpdated)
uploader.upload(self.path,url)
def onUploadSuccess(self, url):
stdout.write("\n")
stdout.flush()
print("Upload Succ: url: %s "%( url))
self.doSendImage(url)
def onError(self):
stdout.write("\n")
stdout.flush()
print("Upload Fail:")
def onProgressUpdated(self, progress):
stdout.write("\r Progress: %s" % progress)
stdout.flush()
def doSendImage(self, url):
print("Sending message_video")
statinfo = os.stat(self.path)
name=os.path.basename(self.path)
#im = Image.open("c:\\users\\poot\\desktop\\icon.png")
#im.thumbnail(size, Image.ANTIALIAS)
#msgId = self.methodsInterface.call("message_videoSend", (self.jid, url, name,str(statinfo.st_size), self.createThumb()))
msgId = self.methodsInterface.call("message_videoSend", (self.jid, url, name,str(statinfo.st_size), None))
self.sentCache[msgId] = [int(time.time()), self.path]
def createThumb(self):
THUMBNAIL_SIZE = 64, 64
thumbnailFile = "thumb.jpg"
im = Image.open(self.path)
im.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)
im.save(thumbnailFile, "JPEG")
with open(thumbnailFile, "rb") as imageFile:
raw = base64.b64encode(imageFile.read())
return raw;
| 33.841328
| 145
| 0.726638
|
827acea0f9e04b1c2291a731ce67b919a277b139
| 1,569
|
py
|
Python
|
xdl/test/python/unit_test/ps_ops/test_ps_sparse_assign_op.py
|
hitflame/x-deeplearning
|
c8029396c6ae6dbf397a34a1801ceadc824e4f8d
|
[
"Apache-2.0"
] | 2
|
2019-11-11T09:51:56.000Z
|
2021-08-04T04:02:29.000Z
|
xdl/test/python/unit_test/ps_ops/test_ps_sparse_assign_op.py
|
hitflame/x-deeplearning
|
c8029396c6ae6dbf397a34a1801ceadc824e4f8d
|
[
"Apache-2.0"
] | 1
|
2019-11-29T14:52:53.000Z
|
2019-11-29T14:52:53.000Z
|
xdl/test/python/unit_test/ps_ops/test_ps_sparse_assign_op.py
|
hitflame/x-deeplearning
|
c8029396c6ae6dbf397a34a1801ceadc824e4f8d
|
[
"Apache-2.0"
] | 1
|
2019-08-03T05:22:19.000Z
|
2019-08-03T05:22:19.000Z
|
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import xdl
import unittest
import numpy as np
from xdl.python.lib.datatype import *
from xdl.python.lib.graph import execute
class TestPsSparseAssignOp(unittest.TestCase):
def test_all(self):
var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,2], initializer=xdl.Zeros())
execute(xdl.variable_registers())
execute(xdl.global_initializers())
op = xdl.ps_sparse_assign_op(
var_name="w", var_type="index",
ids=np.array([1,2], dtype=np.int32),
values=np.array([[1,2],[3,4]], dtype=np.float32))
execute(op)
ret = execute(var.value)
self.assertTrue((ret == np.array([[0,0],[1,2],[3,4],[0,0]], dtype=np.float32)).all())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestPsSparseAssignOp)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| 39.225
| 96
| 0.660293
|
96da19a11b7f019b832f836904ff25fd5ee42918
| 398
|
py
|
Python
|
kp_report.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 152
|
2019-10-07T03:15:53.000Z
|
2022-03-24T16:26:26.000Z
|
kp_report.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 46
|
2019-11-04T09:51:51.000Z
|
2022-03-06T18:40:13.000Z
|
kp_report.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 28
|
2019-11-04T02:02:23.000Z
|
2021-12-29T06:10:04.000Z
|
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
import kp_evaluate
__author__ = "Rui Meng"
__email__ = "rui.meng@pitt.edu"
if __name__ == '__main__':
eval_dir = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2one/meng17-one2one-kp20k-v2/'
dataset_scores_dict = kp_evaluate.gather_eval_results(eval_root_dir=eval_dir)
print(dataset_scores_dict)
| 22.111111
| 113
| 0.738693
|
a7377dad9309074ca0a9ede9d231125a41cae3e7
| 831
|
py
|
Python
|
blog/migrations/0009_auto_20190129_0021.py
|
ufotable/DjangoBlog
|
64ac857d2d105b39200984816187e5a7bf6be43a
|
[
"MIT"
] | 21
|
2019-02-12T09:21:03.000Z
|
2019-06-05T03:36:16.000Z
|
blog/migrations/0009_auto_20190129_0021.py
|
ufotable/DjangoBlog
|
64ac857d2d105b39200984816187e5a7bf6be43a
|
[
"MIT"
] | 25
|
2019-01-25T02:31:30.000Z
|
2019-03-09T13:52:12.000Z
|
blog/migrations/0009_auto_20190129_0021.py
|
ufotable/DjangoBlog
|
64ac857d2d105b39200984816187e5a7bf6be43a
|
[
"MIT"
] | 10
|
2019-02-28T10:30:50.000Z
|
2019-05-16T00:11:13.000Z
|
# Generated by Django 2.1.4 on 2019-01-29 00:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20190129_0019'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='published_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 1, 29, 0, 21, 39, 296873)),
),
migrations.AlterField(
model_name='comment',
name='reply_to',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='post',
name='published_date',
field=models.DateTimeField(default=datetime.datetime(2019, 1, 29, 0, 21, 39, 296279)),
),
]
| 27.7
| 110
| 0.590854
|
232b9a28aba486a88730936490c4c2961cbe2dce
| 549
|
py
|
Python
|
BlueprintsApp/main/gui/buttons/apply_button.py
|
PiCodingClub/BlueprintsEdu
|
f65ad2c3bf6f01acb26660505f6ceded0bee888f
|
[
"Apache-2.0"
] | null | null | null |
BlueprintsApp/main/gui/buttons/apply_button.py
|
PiCodingClub/BlueprintsEdu
|
f65ad2c3bf6f01acb26660505f6ceded0bee888f
|
[
"Apache-2.0"
] | null | null | null |
BlueprintsApp/main/gui/buttons/apply_button.py
|
PiCodingClub/BlueprintsEdu
|
f65ad2c3bf6f01acb26660505f6ceded0bee888f
|
[
"Apache-2.0"
] | null | null | null |
from gui.buttons.button import Button
from utils.string_utils import StringUtils
from utils import scene_utils
from utils.gui_utils import Themes
class ApplyButton(Button):
def __init__(self, pos=0):
Button.__init__(self, StringUtils.get_string("ID_APPLY"), pos)
def on_click(self, board, form=None):
board.set_scene(scene_utils.WELCOME_SCENE)
super().on_click(board)
def update_button(self, color=Themes.DEFAULT_THEME.get("button")):
super().update_button(StringUtils.get_string("ID_APPLY"), color)
| 30.5
| 72
| 0.739526
|
b37d859710b6bcfc64412cb827233ee4cdf18fdc
| 1,232
|
py
|
Python
|
src/posts/serializers/post.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
src/posts/serializers/post.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | 1
|
2020-06-05T22:09:06.000Z
|
2020-06-05T22:09:06.000Z
|
src/posts/serializers/post.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
# posts/serializers/post.py
from rest_framework.serializers import HyperlinkedModelSerializer
from posts.models import Post
from posts.serializers import PostCommentSerializer, PostImageSerializer, UserSerializer
class AdminPostSerializer(HyperlinkedModelSerializer):
parent_lookup_kwargs = {
'user_pk': 'user__pk',
}
user = UserSerializer(read_only=True)
comments = PostCommentSerializer(many=True, read_only=True)
images = PostImageSerializer(many=True, read_only=True)
class Meta:
model = Post
fields = (
'url',
'id',
'user',
'title',
'description',
'short_description',
'approved',
'likes',
'dislikes',
'datetime_created',
'datetime_modified',
'comments',
'images',
)
read_only_fields = (
'likes',
'dislikes',
)
depth = 1
class PostSerializer(AdminPostSerializer):
class Meta(AdminPostSerializer.Meta):
read_only_fields = AdminPostSerializer.Meta.read_only_fields + (
'approved',
)
| 26.212766
| 89
| 0.569805
|
d59c46d26abcd04390984a019be11e79a32a9f65
| 1,419
|
py
|
Python
|
test_python/layers/test_softmax.py
|
moskomule/TensorComprehensions
|
c215ae2bbe1cf0424a65b1848dec486aa2d2eafa
|
[
"Apache-2.0"
] | null | null | null |
test_python/layers/test_softmax.py
|
moskomule/TensorComprehensions
|
c215ae2bbe1cf0424a65b1848dec486aa2d2eafa
|
[
"Apache-2.0"
] | null | null | null |
test_python/layers/test_softmax.py
|
moskomule/TensorComprehensions
|
c215ae2bbe1cf0424a65b1848dec486aa2d2eafa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import tensor_comprehensions as tc
import torch
import torch.cuda
import unittest
class TestSoftmax(unittest.TestCase):
# one can't reuse names on the LHS once they've been used in another tensor
# otherwise the size of the tensors is ill-defined
def test_softmax(self):
LANG = """
def softmax(float(N, D) I) -> (O, maxVal, expDistance, expSum) {
maxVal(n) max= I(n, d)
expDistance(n, d) = exp(I(n, d) - maxVal(n))
expSum(n) +=! expDistance(n, d)
O(n, d) = expDistance(n, d) / expSum(n)
}
"""
softmax = tc.define(LANG, name="softmax")
inp = torch.randn(32, 16).cuda()
out = softmax(inp)
if __name__ == '__main__':
unittest.main()
| 33
| 79
| 0.627202
|
d7dd37609fbf0bff444d9fdee2adc3db2e58fb88
| 3,742
|
py
|
Python
|
ddqn.py
|
tonegas/deep-q-learning
|
b375acb0ae64340706ea6dd03702d1dde5e932c2
|
[
"MIT"
] | 1
|
2020-12-03T15:46:19.000Z
|
2020-12-03T15:46:19.000Z
|
ddqn.py
|
AlexKaravaev/deep-q-learning
|
eba2519d34d97620584810427c1331d37a2dcf1d
|
[
"MIT"
] | null | null | null |
ddqn.py
|
AlexKaravaev/deep-q-learning
|
eba2519d34d97620584810427c1331d37a2dcf1d
|
[
"MIT"
] | 2
|
2019-04-13T08:36:58.000Z
|
2021-05-08T08:20:39.000Z
|
# -*- coding: utf-8 -*-
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras import backend as K
EPISODES = 5000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.99
self.learning_rate = 0.001
self.model = self._build_model()
self.target_model = self._build_model()
self.update_target_model()
def _huber_loss(self, target, prediction):
# sqrt(1+error^2)-1
error = prediction - target
return K.mean(K.sqrt(1+K.square(error))-1, axis=-1)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss=self._huber_loss,
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = self.model.predict(state)
if done:
target[0][action] = reward
else:
a = self.model.predict(next_state)[0]
t = self.target_model.predict(next_state)[0]
target[0][action] = reward + self.gamma * t[np.argmax(a)]
self.model.fit(state, target, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
# agent.load("./save/cartpole-ddqn.h5")
done = False
batch_size = 32
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
# env.render()
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
agent.update_target_model()
print("episode: {}/{}, score: {}, e: {:.2}"
.format(e, EPISODES, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
# if e % 10 == 0:
# agent.save("./save/cartpole-ddqn.h5")
| 35.301887
| 74
| 0.607964
|
d2d1a179393059673b7e3fc4856e33e17ceba783
| 171
|
py
|
Python
|
examples/nested_directory_tests_fixture/tests/some_top_directory_test.py
|
Avvir/pyne
|
864885a8fb632b72c00af164f150b1daa38a346f
|
[
"MIT"
] | 4
|
2018-08-10T20:05:10.000Z
|
2019-07-24T15:29:32.000Z
|
examples/nested_directory_tests_fixture/tests/some_top_directory_test.py
|
Avvir/pyne
|
864885a8fb632b72c00af164f150b1daa38a346f
|
[
"MIT"
] | 6
|
2018-09-25T20:15:51.000Z
|
2021-12-22T17:09:52.000Z
|
examples/nested_directory_tests_fixture/tests/some_top_directory_test.py
|
Avvir/pyne
|
864885a8fb632b72c00af164f150b1daa38a346f
|
[
"MIT"
] | null | null | null |
from pynetest.pyne_test_collector import it
from pynetest.pyne_tester import pyne
@pyne
def some_top_directory_test():
@it("can pass")
def _(self):
pass
| 17.1
| 43
| 0.719298
|
8ea6884ac306051b5594f9511900716e12154c8d
| 665
|
py
|
Python
|
ml-agents/tests/yamato/sample_curation.py
|
Musubee/ml-agents
|
22a45cc14788cbbe0b3b874f510f0a048b494675
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/tests/yamato/sample_curation.py
|
Musubee/ml-agents
|
22a45cc14788cbbe0b3b874f510f0a048b494675
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/tests/yamato/sample_curation.py
|
Musubee/ml-agents
|
22a45cc14788cbbe0b3b874f510f0a048b494675
|
[
"Apache-2.0"
] | null | null | null |
import sys
import argparse
from .yamato_utils import get_base_path, create_samples
def main(scenes):
base_path = get_base_path()
print(f"Running in base path {base_path}")
returncode = create_samples(
scenes,
base_path,
log_output_path=None, # Log to stdout so we get timestamps on the logs
)
if returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(returncode)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scene", nargs="+", default=None, required=True)
args = parser.parse_args()
main(args.scene)
| 22.166667
| 79
| 0.657143
|
e0fb8589e71c6fd2749f9f68c1198b65e2b75795
| 525
|
py
|
Python
|
DQMServices/Core/python/DQMStore_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
DQMServices/Core/python/DQMStore_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
DQMServices/Core/python/DQMStore_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
DQMStore = cms.Service("DQMStore",
verbose = cms.untracked.int32(0),
# similar to LSBasedMode but for offline. Explicitly sets LumiFLag on all
# MEs/modules that allow it (canSaveByLumi)
saveByLumi = cms.untracked.bool(False),
trackME = cms.untracked.string(""),
# UNUSED: historical HLT configs expect this option to be present, so it
# remains here, even though the DQMStore does not use it any more.
enableMultiThread = cms.untracked.bool(True)
)
| 37.5
| 77
| 0.725714
|
d1b5526b2c01ae7998ffec7b3276d7596f42c545
| 2,217
|
py
|
Python
|
src/cogs/event.py
|
thuanGIT/cinnamon
|
f06166f7ccd417b4ec9963a02753423cc1bb3f21
|
[
"MIT"
] | null | null | null |
src/cogs/event.py
|
thuanGIT/cinnamon
|
f06166f7ccd417b4ec9963a02753423cc1bb3f21
|
[
"MIT"
] | null | null | null |
src/cogs/event.py
|
thuanGIT/cinnamon
|
f06166f7ccd417b4ec9963a02753423cc1bb3f21
|
[
"MIT"
] | 1
|
2021-03-18T01:00:50.000Z
|
2021-03-18T01:00:50.000Z
|
from discord.ext import commands
import sys
class Event(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f'{self.bot.user} has connected to Discord!')
@commands.Cog.listener()
async def on_message(self, message):
content = message.content
# Check if the sender is the bot itself
if message.author == self.bot.user:
return
elif not content.startswith('#'):
await message.channel.send("Let me notify Jamie! Please wait a little...", delete_after = 0.5)
# Register commands to work with on_message
# Only use this in @bot.event. If placed in a listener, do not manually call this.
# await self.bot.process_commands(message)
# Overriding the default error handler to save the error logs
# Won't take any effect as said by the documentation
# @commands.Cog.listener()
# async def on_error(self, event, *args, **kwargs):
# with open('error.log', 'a') as f:
# # Since on_message take only 1 argument (message) so args[0] should be expected to be that argument.
# f.write(f'Unhandled message: {args[0]}\n')
#Overwrite on_command_error
@commands.Cog.listener()
async def on_command_error(self, context, error):
type_help = '\nType \'#help\' to see available commands and instructions.'
if isinstance(error, commands.CommandNotFound):
await context.send("No such command found!" + type_help)
elif isinstance(error, commands.BadArgument):
await context.send("Syntax Error!" + type_help)
elif isinstance(error, commands.NotOwner):
await context.send("You are not authorized for deleting a question!" + type_help)
else:
print(error)
await context.send("Opps! Something happens!" + type_help)
@commands.Cog.listener()
async def on_disconnect(self):
print("Cinnamon disconnected for some reason.")
@commands.Cog.listener()
async def on_resumed(self):
print("Cinnamon is back online.")
def setup(bot):
bot.add_cog(Event(bot))
| 39.589286
| 114
| 0.641858
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.